diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile deleted file mode 100644 index 4234de160..000000000 --- a/.ci/Jenkinsfile +++ /dev/null @@ -1,81 +0,0 @@ -def golang = ['1.23', '1.24'] -def golangDefault = "golang:${golang.last()}" - -async { - - for (version in golang) { - def go = version - - task("test/go${go}") { - container("golang:${go}") { - sh 'make test' - } - } - - task("build/go${go}") { - container("golang:${go}") { - for (app in ['cli', 'node', 'ir', 'adm', 'lens']) { - sh """ - make bin/frostfs-${app} - bin/frostfs-${app} --version - """ - } - } - } - } - - task('test/race') { - container(golangDefault) { - sh 'make test GOFLAGS="-count=1 -race"' - } - } - - task('lint') { - container(golangDefault) { - sh 'make lint-install lint' - } - } - - task('staticcheck') { - container(golangDefault) { - sh 'make staticcheck-install staticcheck-run' - } - } - - task('gopls') { - container(golangDefault) { - sh 'make gopls-install gopls-run' - } - } - - task('gofumpt') { - container(golangDefault) { - sh ''' - make fumpt-install - make fumpt - git diff --exit-code --quiet - ''' - } - } - - task('vulncheck') { - container(golangDefault) { - sh ''' - go install golang.org/x/vuln/cmd/govulncheck@latest - govulncheck ./... - ''' - } - } - - task('pre-commit') { - dockerfile(""" - FROM ${golangDefault} - RUN apt update && \ - apt install -y --no-install-recommends pre-commit - """) { - withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { - sh 'pre-commit run --color=always --hook-stage=manual --all-files' - } - } - } -} diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml index d568b9607..ce2d64dd9 100644 --- a/.forgejo/workflows/build.yml +++ b/.forgejo/workflows/build.yml @@ -1,10 +1,6 @@ name: Build -on: - pull_request: - push: - branches: - - master +on: [pull_request] jobs: build: @@ -12,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.23', '1.24' ] + go_versions: [ '1.22', '1.23' ] steps: - uses: actions/checkout@v3 diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml index 190d7764a..7c5af8410 100644 --- a/.forgejo/workflows/dco.yml +++ b/.forgejo/workflows/dco.yml @@ -13,7 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.22' - name: Run commit format checker uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml deleted file mode 100644 index fe91d65f9..000000000 --- a/.forgejo/workflows/oci-image.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: OCI image - -on: - push: - workflow_dispatch: - -jobs: - image: - name: Build container images - runs-on: docker - container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm - steps: - - name: Clone git repo - uses: actions/checkout@v3 - - - name: Build OCI image - run: make images - - - name: Push image to OCI registry - run: | - echo "$REGISTRY_PASSWORD" \ - | docker login --username truecloudlab --password-stdin git.frostfs.info - make push-images - if: >- - startsWith(github.ref, 'refs/tags/v') && - (github.event_name == 'workflow_dispatch' || github.event_name == 'push') - env: - REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}} diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml index c2e293175..8b06a2fdf 100644 --- a/.forgejo/workflows/pre-commit.yml +++ b/.forgejo/workflows/pre-commit.yml @@ -1,10 +1,5 @@ name: Pre-commit hooks - -on: - pull_request: - push: - branches: - - master +on: [pull_request] jobs: precommit: @@ -21,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.24 + go-version: 1.23 - name: Set up Python run: | apt update diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index f3f5432ce..07ba5c268 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -1,10 +1,5 @@ name: Tests and linters - -on: - pull_request: - push: - branches: - - master +on: [pull_request] jobs: lint: @@ -16,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.23' cache: true - name: Install linters @@ -30,7 +25,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.23', '1.24' ] + go_versions: [ '1.22', '1.23' ] fail-fast: false steps: - uses: actions/checkout@v3 @@ -53,7 +48,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.22' cache: true - name: Run tests @@ -68,7 +63,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.23' cache: true - name: Install staticcheck @@ -104,7 +99,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.23' cache: true - name: Install gofumpt diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index bc94792d8..2951a8059 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -1,10 +1,5 @@ name: Vulncheck - -on: - pull_request: - push: - branches: - - master +on: [pull_request] jobs: vulncheck: @@ -18,8 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.24' - check-latest: true + go-version: '1.23' - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.forgejo/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md similarity index 100% rename from .forgejo/ISSUE_TEMPLATE/bug_report.md rename to .github/ISSUE_TEMPLATE/bug_report.md diff --git a/.forgejo/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml similarity index 100% rename from .forgejo/ISSUE_TEMPLATE/config.yml rename to .github/ISSUE_TEMPLATE/config.yml diff --git a/.forgejo/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md similarity index 100% rename from .forgejo/ISSUE_TEMPLATE/feature_request.md rename to .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.forgejo/logo.svg b/.github/logo.svg similarity index 100% rename from .forgejo/logo.svg rename to .github/logo.svg diff --git a/.golangci.yml b/.golangci.yml index e3ec09f60..57e3b4494 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,107 +1,93 @@ -version: "2" +# This file contains all available configuration options +# with their default values. + +# options for analysis running run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 20m + + # include test files or not, default is true tests: false + +# output configuration options output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" formats: - tab: - path: stdout - colors: false + - format: tab + +# all available settings of specific linters +linters-settings: + exhaustive: + # indicates that switch statements are to be considered exhaustive if a + # 'default' case is present, even if all enum members aren't listed in the + # switch + default-signifies-exhaustive: true + govet: + # report about shadowed variables + check-shadowing: false + staticcheck: + checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. + funlen: + lines: 80 # default 60 + statements: 60 # default 40 + gocognit: + min-complexity: 40 # default 30 + importas: + no-unaliased: true + no-extra-aliases: false + alias: + pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object + alias: objectSDK + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + custom: + truecloudlab-linters: + path: bin/linters/external_linters.so + original-url: git.frostfs.info/TrueCloudLab/linters.git + settings: + noliteral: + target-methods : ["reportFlushError", "reportError"] + disable-packages: ["codes", "err", "res","exec"] + constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + linters: - default: none enable: - - bidichk - - containedctx - - contextcheck - - copyloopvar - - durationcheck - - errcheck - - exhaustive - - funlen - - gocognit - - gocritic - - godot - - importas - - ineffassign - - intrange - - misspell - - perfsprint - - predeclared - - protogetter - - reassign + # mandatory linters + - govet - revive + + # some default golangci-lint linters + - errcheck + - gosimple + - godot + - ineffassign - staticcheck - - testifylint - - truecloudlab-linters - - unconvert - - unparam + - typecheck - unused - - usetesting - - whitespace - settings: - exhaustive: - default-signifies-exhaustive: true - funlen: - lines: 80 - statements: 60 - gocognit: - min-complexity: 40 - gocritic: - disabled-checks: - - ifElseChain - importas: - alias: - - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - no-unaliased: true - no-extra-aliases: false - staticcheck: - checks: - - all - - -QF1002 - unused: - field-writes-are-uses: false - exported-fields-are-used: false - local-variables-are-used: false - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs - disable-packages: - - codes - - err - - res - - exec - target-methods: - - reportFlushError - - reportError - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - enable: - - gci + + # extra linters + - bidichk + - durationcheck + - exhaustive + - copyloopvar - gofmt - goimports - settings: - gci: - sections: - - standard - - default - custom-order: true - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ + - misspell + - predeclared + - reassign + - whitespace + - containedctx + - funlen + - gocognit + - contextcheck + - importas + - truecloudlab-linters + - perfsprint + - testifylint + - protogetter + - intrange + - tenv + disable-all: true + fast: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 92c84ab16..e4ba6a5d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,30 +9,6 @@ Changelog for FrostFS Node ### Removed ### Updated -## [v0.44.0] - 2024-25-11 - Rongbuk - -### Added -- Allow to prioritize nodes during GET traversal via attributes (#1439) -- Add metrics for the frostfsid cache (#1464) -- Customize constant attributes attached to every tracing span (#1488) -- Manage additional keys in the `frostfsid` contract (#1505) -- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519) - -### Changed -- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396) -- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515) - -### Fixed -- Fix EC object search (#1408) -- Fix EC object put when one of the nodes is unavailable (#1427) - -### Removed -- Drop most of the eACL-related code (#1425) -- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483) - -### Upgrading from v0.43.0 -The metabase schema has changed completely, resync is required. - ## [v0.42.0] ### Added diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index d19c96a5c..000000000 --- a/CODEOWNERS +++ /dev/null @@ -1,3 +0,0 @@ -.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers -.forgejo/.* @potyarkin -Makefile @potyarkin diff --git a/Makefile b/Makefile index 575eaae6f..68a31febe 100755 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ #!/usr/bin/make -f SHELL = bash -.SHELLFLAGS = -euo pipefail -c REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") @@ -8,16 +7,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" -GO_VERSION ?= 1.23 -LINT_VERSION ?= 2.0.2 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 +GO_VERSION ?= 1.22 +LINT_VERSION ?= 1.61.0 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.7 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 ifeq ($(shell uname), Linux) PROTOC_OS_VERSION=linux-x86_64 endif -STATICCHECK_VERSION ?= 2025.1.1 +STATICCHECK_VERSION ?= 2024.1.1 ARCH = amd64 BIN = bin @@ -43,7 +42,7 @@ GOFUMPT_VERSION ?= v0.7.0 GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) -GOPLS_VERSION ?= v0.17.1 +GOPLS_VERSION ?= v0.15.1 GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_TEMP_FILE := $(shell mktemp) @@ -116,7 +115,7 @@ protoc: # Install protoc protoc-install: @rm -rf $(PROTOBUF_DIR) - @mkdir -p $(PROTOBUF_DIR) + @mkdir $(PROTOBUF_DIR) @echo "⇒ Installing protoc... " @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @@ -140,15 +139,6 @@ images: image-storage image-ir image-cli image-adm # Build dirty local Docker images dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm -# Push FrostFS components' docker image to the registry -push-image-%: - @echo "⇒ Publish FrostFS $* docker image " - @docker push $(HUB_IMAGE)-$*:$(HUB_TAG) - -# Push all Docker images to the registry -.PHONY: push-images -push-images: push-image-storage push-image-ir push-image-cli push-image-adm - # Run `make %` in Golang container docker/%: docker run --rm -t \ @@ -170,7 +160,7 @@ imports: # Install gofumpt fumpt-install: @rm -rf $(GOFUMPT_DIR) - @mkdir -p $(GOFUMPT_DIR) + @mkdir $(GOFUMPT_DIR) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) # Run gofumpt @@ -187,44 +177,21 @@ test: @echo "⇒ Running go test" @GOFLAGS="$(GOFLAGS)" go test ./... -# Install Gerrit commit-msg hook -review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks -review-install: - @git config remote.review.url \ - || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node - @mkdir -p $(GIT_HOOK_DIR)/ - @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg - @chmod +x $(GIT_HOOK_DIR)/commit-msg - @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg - @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg - -# Create a PR in Gerrit -review: BRANCH ?= master -review: - @git push review HEAD:refs/for/$(BRANCH) \ - --push-option r=e.stratonikov@yadro.com \ - --push-option r=d.stepanov@yadro.com \ - --push-option r=an.nikiforov@yadro.com \ - --push-option r=a.arifullin@yadro.com \ - --push-option r=ekaterina.lebedeva@yadro.com \ - --push-option r=a.savchuk@yadro.com \ - --push-option r=a.chuprov@yadro.com - # Run pre-commit pre-commit-run: @pre-commit run -a --hook-stage manual # Install linters -lint-install: $(BIN) +lint-install: @rm -rf $(OUTPUT_LINT_DIR) - @mkdir -p $(OUTPUT_LINT_DIR) + @mkdir $(OUTPUT_LINT_DIR) @mkdir -p $(TMP_DIR) @rm -rf $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: @@ -236,7 +203,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir -p $(STATICCHECK_DIR) + @mkdir $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -249,7 +216,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir -p $(GOPLS_DIR) + @mkdir $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls @@ -303,12 +270,10 @@ env-up: all echo "Frostfs contracts not found"; exit 1; \ fi ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH} - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \ - --storage-wallet ./dev/storage/wallet01.json \ - --storage-wallet ./dev/storage/wallet02.json \ - --storage-wallet ./dev/storage/wallet03.json \ - --storage-wallet ./dev/storage/wallet04.json - + ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0 + ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0 + ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0 + ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0 @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \ make locode-download; \ fi @@ -317,6 +282,7 @@ env-up: all # Shutdown dev environment env-down: - docker compose -f dev/docker-compose.yml down -v + docker compose -f dev/docker-compose.yml down + docker volume rm -f frostfs-node_neo-go rm -rf ./$(TMP_DIR)/state rm -rf ./$(TMP_DIR)/storage diff --git a/README.md b/README.md index 0109ed0e5..47d812b18 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@
-
+
@@ -98,7 +98,7 @@ See `frostfs-contract`'s README.md for build instructions.
4. To create container and put object into it run (container and object IDs will be different):
```
-./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --await
+./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --basic-acl public-read-write --await
Enter password > <- press ENTER, the is no password for wallet
CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju
diff --git a/VERSION b/VERSION
index 9052dab96..01efe7f3a 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-v0.44.0
+v0.42.0
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index f194e97f5..81395edb0 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -16,18 +16,10 @@ const (
EndpointFlagDesc = "N3 RPC node endpoint"
EndpointFlagShort = "r"
- WalletPath = "wallet"
- WalletPathShorthand = "w"
- WalletPathUsage = "Path to the wallet"
-
AlphabetWalletsFlag = "alphabet-wallets"
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
- AdminWalletPath = "wallet-admin"
- AdminWalletUsage = "Path to the admin wallet"
-
LocalDumpFlag = "local-dump"
- ProtoConfigPath = "protocol"
ContractsInitFlag = "contracts"
ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)"
ContractsURLFlag = "contracts-url"
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go
deleted file mode 100644
index d67b70d2a..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/root.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package maintenance
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie"
- "github.com/spf13/cobra"
-)
-
-var RootCmd = &cobra.Command{
- Use: "maintenance",
- Short: "Section for maintenance commands",
-}
-
-func init() {
- RootCmd.AddCommand(zombie.Cmd)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
deleted file mode 100644
index 1b66889aa..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package zombie
-
-import (
- "crypto/ecdsa"
- "fmt"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/cli/flags"
- "github.com/nspcc-dev/neo-go/cli/input"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey {
- keyDesc := viper.GetString(walletFlag)
- if keyDesc == "" {
- return &nodeconfig.Key(appCfg).PrivateKey
- }
- data, err := os.ReadFile(keyDesc)
- commonCmd.ExitOnErr(cmd, "open wallet file: %w", err)
-
- priv, err := keys.NewPrivateKeyFromBytes(data)
- if err != nil {
- w, err := wallet.NewWalletFromFile(keyDesc)
- commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err)
- return fromWallet(cmd, w, viper.GetString(addressFlag))
- }
- return &priv.PrivateKey
-}
-
-func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey {
- var (
- addr util.Uint160
- err error
- )
-
- if addrStr == "" {
- addr = w.GetChangeAddress()
- } else {
- addr, err = flags.ParseAddress(addrStr)
- commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err)
- }
-
- acc := w.GetAccount(addr)
- if acc == nil {
- commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr))
- }
-
- pass, err := getPassword()
- commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err)
-
- commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams()))
-
- return &acc.PrivateKey().PrivateKey
-}
-
-func getPassword() (string, error) {
- // this check allows empty passwords
- if viper.IsSet("password") {
- return viper.GetString("password"), nil
- }
-
- return input.ReadPassword("Enter password > ")
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
deleted file mode 100644
index f73f33db9..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package zombie
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func list(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- var containerID *cid.ID
- if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" {
- containerID = &cid.ID{}
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
- }
-
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error {
- if containerID != nil && a.Container() != *containerID {
- return nil
- }
- cmd.Println(a.EncodeToString())
- return nil
- }))
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
deleted file mode 100644
index cd3a64499..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package zombie
-
-import (
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
- nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "github.com/spf13/cobra"
-)
-
-func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client {
- addresses := morphconfig.RPCEndpoint(appCfg)
- if len(addresses) == 0 {
- commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found"))
- }
- key := nodeconfig.Key(appCfg)
- cli, err := client.New(cmd.Context(),
- key,
- client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
- client.WithEndpoints(addresses...),
- client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
- )
- commonCmd.ExitOnErr(cmd, "create morph client: %w", err)
- return cli
-}
-
-func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client {
- hs, err := morph.NNSContractAddress(client.NNSContainerContractName)
- commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err)
- cc, err := cntClient.NewFromMorph(morph, hs, 0)
- commonCmd.ExitOnErr(cmd, "create morph container client: %w", err)
- return cc
-}
-
-func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client {
- hs, err := morph.NNSContractAddress(client.NNSNetmapContractName)
- commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err)
- cli, err := netmapClient.NewFromMorph(morph, hs, 0)
- commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err)
- return cli
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
deleted file mode 100644
index 27f83aec7..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package zombie
-
-import (
- "context"
- "fmt"
- "math"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-type quarantine struct {
- // mtx protects current field.
- mtx sync.Mutex
- current int
- trees []*fstree.FSTree
-}
-
-func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine {
- var paths []string
- for _, sh := range engineInfo.Shards {
- var storagePaths []string
- for _, st := range sh.BlobStorInfo.SubStorages {
- storagePaths = append(storagePaths, st.Path)
- }
- if len(storagePaths) == 0 {
- continue
- }
- paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine"))
- }
- q, err := newQuarantine(paths)
- commonCmd.ExitOnErr(cmd, "create quarantine: %w", err)
- return q
-}
-
-func commonPath(paths []string) string {
- if len(paths) == 0 {
- return ""
- }
- if len(paths) == 1 {
- return paths[0]
- }
- minLen := math.MaxInt
- for _, p := range paths {
- if len(p) < minLen {
- minLen = len(p)
- }
- }
-
- var sb strings.Builder
- for i := range minLen {
- for _, path := range paths[1:] {
- if paths[0][i] != path[i] {
- return sb.String()
- }
- }
- sb.WriteByte(paths[0][i])
- }
- return sb.String()
-}
-
-func newQuarantine(paths []string) (*quarantine, error) {
- var q quarantine
- for i := range paths {
- f := fstree.New(
- fstree.WithDepth(1),
- fstree.WithDirNameLen(1),
- fstree.WithPath(paths[i]),
- fstree.WithPerm(os.ModePerm),
- )
- if err := f.Open(mode.ComponentReadWrite); err != nil {
- return nil, fmt.Errorf("open fstree %s: %w", paths[i], err)
- }
- if err := f.Init(); err != nil {
- return nil, fmt.Errorf("init fstree %s: %w", paths[i], err)
- }
- q.trees = append(q.trees, f)
- }
- return &q, nil
-}
-
-func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
- for i := range q.trees {
- res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a})
- if err != nil {
- continue
- }
- return res.Object, nil
- }
- return nil, &apistatus.ObjectNotFound{}
-}
-
-func (q *quarantine) Delete(ctx context.Context, a oid.Address) error {
- for i := range q.trees {
- _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a})
- if err != nil {
- continue
- }
- return nil
- }
- return &apistatus.ObjectNotFound{}
-}
-
-func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error {
- data, err := obj.Marshal()
- if err != nil {
- return err
- }
-
- var prm common.PutPrm
- prm.Address = objectcore.AddressOf(obj)
- prm.Object = obj
- prm.RawData = data
-
- q.mtx.Lock()
- current := q.current
- q.current = (q.current + 1) % len(q.trees)
- q.mtx.Unlock()
-
- _, err = q.trees[current].Put(ctx, prm)
- return err
-}
-
-func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error {
- var prm common.IteratePrm
- prm.Handler = func(elem common.IterationElement) error {
- return f(elem.Address)
- }
- for i := range q.trees {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- _, err := q.trees[i].Iterate(ctx, prm)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
deleted file mode 100644
index 0b8f2f172..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package zombie
-
-import (
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func remove(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
-
- var containerID cid.ID
- cidStr, _ := cmd.Flags().GetString(cidFlag)
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
-
- var objectID *oid.ID
- oidStr, _ := cmd.Flags().GetString(oidFlag)
- if oidStr != "" {
- objectID = &oid.ID{}
- commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
- }
-
- if objectID != nil {
- var addr oid.Address
- addr.SetContainer(containerID)
- addr.SetObject(*objectID)
- removeObject(cmd, q, addr)
- } else {
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
- if addr.Container() != containerID {
- return nil
- }
- removeObject(cmd, q, addr)
- return nil
- }))
- }
-}
-
-func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) {
- err := q.Delete(cmd.Context(), addr)
- if errors.Is(err, new(apistatus.ObjectNotFound)) {
- return
- }
- commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
deleted file mode 100644
index f179c7c2d..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package zombie
-
-import (
- "crypto/sha256"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func restore(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- morphClient := createMorphClient(cmd, appCfg)
- cnrCli := createContainerClient(cmd, morphClient)
-
- var containerID cid.ID
- cidStr, _ := cmd.Flags().GetString(cidFlag)
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
-
- var objectID *oid.ID
- oidStr, _ := cmd.Flags().GetString(oidFlag)
- if oidStr != "" {
- objectID = &oid.ID{}
- commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
- }
-
- if objectID != nil {
- var addr oid.Address
- addr.SetContainer(containerID)
- addr.SetObject(*objectID)
- restoreObject(cmd, storageEngine, q, addr, cnrCli)
- } else {
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
- if addr.Container() != containerID {
- return nil
- }
- restoreObject(cmd, storageEngine, q, addr, cnrCli)
- return nil
- }))
- }
-}
-
-func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) {
- obj, err := q.Get(cmd.Context(), addr)
- commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err)
- rawCID := make([]byte, sha256.Size)
-
- cid := addr.Container()
- cid.Encode(rawCID)
- cnr, err := cnrCli.Get(cmd.Context(), rawCID)
- commonCmd.ExitOnErr(cmd, "get container: %w", err)
-
- putPrm := engine.PutPrm{
- Object: obj,
- IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value),
- }
- commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm))
- commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr))
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
deleted file mode 100644
index c8fd9e5e5..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package zombie
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- flagBatchSize = "batch-size"
- flagBatchSizeUsage = "Objects iteration batch size"
- cidFlag = "cid"
- cidFlagUsage = "Container ID"
- oidFlag = "oid"
- oidFlagUsage = "Object ID"
- walletFlag = "wallet"
- walletFlagShorthand = "w"
- walletFlagUsage = "Path to the wallet or binary key"
- addressFlag = "address"
- addressFlagUsage = "Address of wallet account"
- moveFlag = "move"
- moveFlagUsage = "Move objects from storage engine to quarantine"
-)
-
-var (
- Cmd = &cobra.Command{
- Use: "zombie",
- Short: "Zombie objects related commands",
- }
- scanCmd = &cobra.Command{
- Use: "scan",
- Short: "Scan storage engine for zombie objects and move them to quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag))
- _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag))
- _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize))
- _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag))
- },
- Run: scan,
- }
- listCmd = &cobra.Command{
- Use: "list",
- Short: "List zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- },
- Run: list,
- }
- restoreCmd = &cobra.Command{
- Use: "restore",
- Short: "Restore zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
- },
- Run: restore,
- }
- removeCmd = &cobra.Command{
- Use: "remove",
- Short: "Remove zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
- },
- Run: remove,
- }
-)
-
-func init() {
- initScanCmd()
- initListCmd()
- initRestoreCmd()
- initRemoveCmd()
-}
-
-func initScanCmd() {
- Cmd.AddCommand(scanCmd)
-
- scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage)
- scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage)
- scanCmd.Flags().String(addressFlag, "", addressFlagUsage)
- scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage)
-}
-
-func initListCmd() {
- Cmd.AddCommand(listCmd)
-
- listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- listCmd.Flags().String(cidFlag, "", cidFlagUsage)
-}
-
-func initRestoreCmd() {
- Cmd.AddCommand(restoreCmd)
-
- restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- restoreCmd.Flags().String(cidFlag, "", cidFlagUsage)
- restoreCmd.Flags().String(oidFlag, "", oidFlagUsage)
-}
-
-func initRemoveCmd() {
- Cmd.AddCommand(removeCmd)
-
- removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- removeCmd.Flags().String(cidFlag, "", cidFlagUsage)
- removeCmd.Flags().String(oidFlag, "", oidFlagUsage)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
deleted file mode 100644
index 268ec4911..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
+++ /dev/null
@@ -1,281 +0,0 @@
-package zombie
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/sha256"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
- clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
- "golang.org/x/sync/errgroup"
-)
-
-func scan(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- batchSize, _ := cmd.Flags().GetUint32(flagBatchSize)
- if batchSize == 0 {
- commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value"))
- }
- move, _ := cmd.Flags().GetBool(moveFlag)
-
- storageEngine := newEngine(cmd, appCfg)
- morphClient := createMorphClient(cmd, appCfg)
- cnrCli := createContainerClient(cmd, morphClient)
- nmCli := createNetmapClient(cmd, morphClient)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- pk := getPrivateKey(cmd, appCfg)
-
- epoch, err := nmCli.Epoch(cmd.Context())
- commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err)
-
- nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch)
- commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err)
-
- cmd.Printf("Epoch: %d\n", nm.Epoch())
- cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes()))
-
- ps := &processStatus{
- statusCount: make(map[status]uint64),
- }
-
- stopCh := make(chan struct{})
- start := time.Now()
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- defer wg.Done()
- tick := time.NewTicker(time.Second)
- defer tick.Stop()
- for {
- select {
- case <-cmd.Context().Done():
- return
- case <-stopCh:
- return
- case <-tick.C:
- fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start))
- }
- }
- }()
- go func() {
- defer wg.Done()
- err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move)
- close(stopCh)
- }()
- wg.Wait()
- commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err)
-
- cmd.Println()
- cmd.Println("Status description:")
- cmd.Println("undefined -- nothing is clear")
- cmd.Println("found -- object is found in cluster")
- cmd.Println("quarantine -- object is not found in cluster")
- cmd.Println()
- for status, count := range ps.statusCount {
- cmd.Printf("Status: %s, Count: %d\n", status, count)
- }
-}
-
-type status string
-
-const (
- statusUndefined status = "undefined"
- statusFound status = "found"
- statusQuarantine status = "quarantine"
-)
-
-func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) {
- rawCID := make([]byte, sha256.Size)
- cid := obj.Address.Container()
- cid.Encode(rawCID)
-
- cnr, err := cnrCli.Get(ctx, rawCID)
- if err != nil {
- var errContainerNotFound *apistatus.ContainerNotFound
- if errors.As(err, &errContainerNotFound) {
- // Policer will deal with this object.
- return statusFound, nil
- }
- return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err)
- }
- nm, err := nmCli.NetMap(ctx)
- if err != nil {
- return statusUndefined, fmt.Errorf("read netmap from morph: %w", err)
- }
-
- nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID)
- if err != nil {
- // Not enough nodes, check all netmap nodes.
- nodes = append([][]netmap.NodeInfo{}, nm.Nodes())
- }
-
- objID := obj.Address.Object()
- cnrID := obj.Address.Container()
- local := true
- raw := false
- if obj.ECInfo != nil {
- objID = obj.ECInfo.ParentID
- local = false
- raw = true
- }
- prm := clientSDK.PrmObjectHead{
- ObjectID: &objID,
- ContainerID: &cnrID,
- Local: local,
- Raw: raw,
- }
-
- var ni clientCore.NodeInfo
- for i := range nodes {
- for j := range nodes[i] {
- if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil {
- return statusUndefined, fmt.Errorf("parse node info: %w", err)
- }
- c, err := cc.Get(ni)
- if err != nil {
- continue
- }
- res, err := c.ObjectHead(ctx, prm)
- if err != nil {
- var errECInfo *objectSDK.ECInfoError
- if raw && errors.As(err, &errECInfo) {
- return statusFound, nil
- }
- continue
- }
- if err := apistatus.ErrFromStatus(res.Status()); err != nil {
- continue
- }
- return statusFound, nil
- }
- }
-
- if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 {
- return statusFound, nil
- }
- return statusQuarantine, nil
-}
-
-func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus,
- appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool,
-) error {
- cc := cache.NewSDKClientCache(cache.ClientCacheOpts{
- DialTimeout: apiclientconfig.DialTimeout(appCfg),
- StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
- ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
- Key: pk,
- AllowExternal: apiclientconfig.AllowExternal(appCfg),
- })
- ctx := cmd.Context()
-
- var cursor *engine.Cursor
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var prm engine.ListWithCursorPrm
- prm.WithCursor(cursor)
- prm.WithCount(batchSize)
-
- res, err := storageEngine.ListWithCursor(ctx, prm)
- if err != nil {
- if errors.Is(err, engine.ErrEndOfListing) {
- return nil
- }
- return fmt.Errorf("list with cursor: %w", err)
- }
-
- cursor = res.Cursor()
- addrList := res.AddressList()
- eg, egCtx := errgroup.WithContext(ctx)
- eg.SetLimit(int(batchSize))
-
- for i := range addrList {
- addr := addrList[i]
- eg.Go(func() error {
- result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr)
- if err != nil {
- return fmt.Errorf("check object %s status: %w", addr.Address, err)
- }
- ps.add(result)
-
- if !move && result == statusQuarantine {
- cmd.Println(addr)
- return nil
- }
-
- if result == statusQuarantine {
- return moveToQuarantine(egCtx, storageEngine, q, addr.Address)
- }
- return nil
- })
- }
- if err := eg.Wait(); err != nil {
- return fmt.Errorf("process objects batch: %w", err)
- }
- }
-}
-
-func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error {
- var getPrm engine.GetPrm
- getPrm.WithAddress(addr)
- res, err := storageEngine.Get(ctx, getPrm)
- if err != nil {
- return fmt.Errorf("get object %s from storage engine: %w", addr, err)
- }
-
- if err := q.Put(ctx, res.Object()); err != nil {
- return fmt.Errorf("put object %s to quarantine: %w", addr, err)
- }
-
- var delPrm engine.DeletePrm
- delPrm.WithForceRemoval()
- delPrm.WithAddress(addr)
-
- if err = storageEngine.Delete(ctx, delPrm); err != nil {
- return fmt.Errorf("delete object %s from storage engine: %w", addr, err)
- }
- return nil
-}
-
-type processStatus struct {
- guard sync.RWMutex
- statusCount map[status]uint64
- count uint64
-}
-
-func (s *processStatus) add(st status) {
- s.guard.Lock()
- defer s.guard.Unlock()
- s.statusCount[st]++
- s.count++
-}
-
-func (s *processStatus) total() uint64 {
- s.guard.RLock()
- defer s.guard.RUnlock()
- return s.count
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
deleted file mode 100644
index 5be34d502..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package zombie
-
-import (
- "context"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
- shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
- blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
- fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/panjf2000/ants/v2"
- "github.com/spf13/cobra"
- "go.etcd.io/bbolt"
- "go.uber.org/zap"
-)
-
-func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
- ngOpts := storageEngineOptions(c)
- shardOpts := shardOptions(cmd, c)
- e := engine.New(ngOpts...)
- for _, opts := range shardOpts {
- _, err := e.AddShard(cmd.Context(), opts...)
- commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
- }
- commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
- commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
- return e
-}
-
-func storageEngineOptions(c *config.Config) []engine.Option {
- return []engine.Option{
- engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
- engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
- }
-}
-
-func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
- var result [][]shard.Option
- err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
- result = append(result, getShardOpts(cmd, c, sh))
- return nil
- })
- commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
- return result
-}
-
-func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
- wc, wcEnabled := getWriteCacheOpts(sh)
- return []shard.Option{
- shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- shard.WithRefillMetabase(sh.RefillMetabase()),
- shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
- shard.WithMode(sh.Mode()),
- shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
- shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
- shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
- shard.WithWriteCache(wcEnabled),
- shard.WithWriteCacheOptions(wc),
- shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
- shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
- shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
- shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
- return pool
- }),
- shard.WithLimiter(qos.NewNoopLimiter()),
- }
-}
-
-func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
- if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
- var result []writecache.Option
- result = append(result,
- writecache.WithPath(wc.Path()),
- writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
- writecache.WithMaxObjectSize(wc.MaxObjectSize()),
- writecache.WithFlushWorkersCount(wc.WorkerCount()),
- writecache.WithMaxCacheSize(wc.SizeLimit()),
- writecache.WithMaxCacheCount(wc.CountLimit()),
- writecache.WithNoSync(wc.NoSync()),
- writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- writecache.WithQoSLimiter(qos.NewNoopLimiter()),
- )
- return result, true
- }
- return nil, false
-}
-
-func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
- var piloramaOpts []pilorama.Option
- if config.BoolSafe(c.Sub("tree"), "enabled") {
- pr := sh.Pilorama()
- piloramaOpts = append(piloramaOpts,
- pilorama.WithPath(pr.Path()),
- pilorama.WithPerm(pr.Perm()),
- pilorama.WithNoSync(pr.NoSync()),
- pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
- pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
- )
- }
- return piloramaOpts
-}
-
-func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
- return []meta.Option{
- meta.WithPath(sh.Metabase().Path()),
- meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
- meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
- meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
- meta.WithBoltDBOptions(&bbolt.Options{
- Timeout: 100 * time.Millisecond,
- }),
- meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- meta.WithEpochState(&epochState{}),
- }
-}
-
-func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
- result := []blobstor.Option{
- blobstor.WithCompression(sh.Compression()),
- blobstor.WithStorages(getSubStorages(ctx, sh)),
- blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- }
-
- return result
-}
-
-func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
- var ss []blobstor.SubStorage
- for _, storage := range sh.BlobStor().Storages() {
- switch storage.Type() {
- case blobovniczatree.Type:
- sub := blobovniczaconfig.From((*config.Config)(storage))
- blobTreeOpts := []blobovniczatree.Option{
- blobovniczatree.WithRootPath(storage.Path()),
- blobovniczatree.WithPermissions(storage.Perm()),
- blobovniczatree.WithBlobovniczaSize(sub.Size()),
- blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
- blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
- blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
- blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
- blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
- blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
- blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
- blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())),
- blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())),
- blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
- }
-
- ss = append(ss, blobstor.SubStorage{
- Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
- Policy: func(_ *objectSDK.Object, data []byte) bool {
- return uint64(len(data)) < sh.SmallSizeLimit()
- },
- })
- case fstree.Type:
- sub := fstreeconfig.From((*config.Config)(storage))
- fstreeOpts := []fstree.Option{
- fstree.WithPath(storage.Path()),
- fstree.WithPerm(storage.Perm()),
- fstree.WithDepth(sub.Depth()),
- fstree.WithNoSync(sub.NoSync()),
- fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- }
-
- ss = append(ss, blobstor.SubStorage{
- Storage: fstree.New(fstreeOpts...),
- Policy: func(_ *objectSDK.Object, _ []byte) bool {
- return true
- },
- })
- default:
- // should never happen, that has already
- // been handled: when the config was read
- }
- }
- return ss
-}
-
-type epochState struct{}
-
-func (epochState) CurrentEpoch() uint64 {
- return 0
-}
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
index c0c290c5e..00b30c9b2 100644
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -28,7 +28,6 @@ const (
var (
errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found")
- errUpgradeFailed = errors.New("upgrade failed")
)
var UpgradeCmd = &cobra.Command{
@@ -92,19 +91,14 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil {
return err
}
- allSuccess := true
for mb, ok := range result {
if ok {
cmd.Println(mb, ": success")
} else {
cmd.Println(mb, ": failed")
- allSuccess = false
}
}
- if allSuccess {
- return nil
- }
- return errUpgradeFailed
+ return nil
}
func getMetabasePaths(appCfg *config.Config) ([]string, error) {
@@ -141,7 +135,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
if err != nil {
return nil, fmt.Errorf("resolve container contract hash: %w", err)
}
- cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
+ cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
if err != nil {
return nil, fmt.Errorf("create morph container client: %w", err)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
index 1960faab4..077e03737 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
@@ -5,19 +5,35 @@ import (
"encoding/json"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
- jsonFlag = "json"
- jsonFlagDesc = "Output rule chains in JSON format"
- addrAdminFlag = "addr"
- addrAdminDesc = "The address of the admins wallet"
+ namespaceTarget = "namespace"
+ containerTarget = "container"
+ userTarget = "user"
+ groupTarget = "group"
+ jsonFlag = "json"
+ jsonFlagDesc = "Output rule chains in JSON format"
+ chainIDFlag = "chain-id"
+ chainIDDesc = "Rule chain ID"
+ ruleFlag = "rule"
+ ruleFlagDesc = "Rule chain in text format"
+ pathFlag = "path"
+ pathFlagDesc = "path to encoded chain in JSON or binary format"
+ targetNameFlag = "target-name"
+ targetNameDesc = "Resource name in APE resource name format"
+ targetTypeFlag = "target-type"
+ targetTypeDesc = "Resource type(container/namespace)"
+ addrAdminFlag = "addr"
+ addrAdminDesc = "The address of the admins wallet"
+ chainNameFlag = "chain-name"
+ chainNameFlagDesc = "Chain name(ingress|s3)"
)
var (
@@ -85,17 +101,17 @@ func initAddRuleChainCmd() {
addRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
addRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- addRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- addRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
+ addRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(targetTypeFlag)
+ addRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(targetNameFlag)
- addRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.ChainIDFlag)
- addRuleChainCmd.Flags().StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc)
- addRuleChainCmd.Flags().String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc)
- addRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
- addRuleChainCmd.MarkFlagsMutuallyExclusive(apeCmd.RuleFlag, apeCmd.PathFlag)
+ addRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(chainIDFlag)
+ addRuleChainCmd.Flags().StringArray(ruleFlag, []string{}, ruleFlagDesc)
+ addRuleChainCmd.Flags().String(pathFlag, "", pathFlagDesc)
+ addRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
+ addRuleChainCmd.MarkFlagsMutuallyExclusive(ruleFlag, pathFlag)
}
func initRemoveRuleChainCmd() {
@@ -104,25 +120,26 @@ func initRemoveRuleChainCmd() {
removeRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
removeRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- removeRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- removeRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
- removeRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- removeRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
+ removeRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc)
+ _ = removeRuleChainCmd.MarkFlagRequired(targetTypeFlag)
+ removeRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc)
+ _ = removeRuleChainCmd.MarkFlagRequired(targetNameFlag)
+ removeRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc)
+ removeRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
removeRuleChainCmd.Flags().Bool(commonflags.AllFlag, false, "Remove all chains for target")
- removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, apeCmd.ChainIDFlag)
+ removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, chainIDFlag)
}
func initListRuleChainsCmd() {
Cmd.AddCommand(listRuleChainsCmd)
listRuleChainsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listRuleChainsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
- _ = listRuleChainsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- listRuleChainsCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ listRuleChainsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc)
+ _ = listRuleChainsCmd.MarkFlagRequired(targetTypeFlag)
+ listRuleChainsCmd.Flags().String(targetNameFlag, "", targetNameDesc)
+ _ = listRuleChainsCmd.MarkFlagRequired(targetNameFlag)
listRuleChainsCmd.Flags().Bool(jsonFlag, false, jsonFlagDesc)
- listRuleChainsCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
+ listRuleChainsCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
}
func initSetAdminCmd() {
@@ -144,15 +161,15 @@ func initListTargetsCmd() {
Cmd.AddCommand(listTargetsCmd)
listTargetsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listTargetsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
- _ = listTargetsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ listTargetsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc)
+ _ = listTargetsCmd.MarkFlagRequired(targetTypeFlag)
}
func addRuleChain(cmd *cobra.Command, _ []string) {
- chain := apeCmd.ParseChain(cmd)
+ chain := parseChain(cmd)
target := parseTarget(cmd)
pci, ac := newPolicyContractInterface(cmd)
- h, vub, err := pci.AddMorphRuleChain(apeCmd.ParseChainName(cmd), target, chain)
+ h, vub, err := pci.AddMorphRuleChain(parseChainName(cmd), target, chain)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "add rule chain error: %w", err)
@@ -164,14 +181,14 @@ func removeRuleChain(cmd *cobra.Command, _ []string) {
pci, ac := newPolicyContractInterface(cmd)
removeAll, _ := cmd.Flags().GetBool(commonflags.AllFlag)
if removeAll {
- h, vub, err := pci.RemoveMorphRuleChainsByTarget(apeCmd.ParseChainName(cmd), target)
+ h, vub, err := pci.RemoveMorphRuleChainsByTarget(parseChainName(cmd), target)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
cmd.Println("All chains for target removed successfully")
} else {
- chainID := apeCmd.ParseChainID(cmd)
- h, vub, err := pci.RemoveMorphRuleChain(apeCmd.ParseChainName(cmd), target, chainID)
+ chainID := parseChainID(cmd)
+ h, vub, err := pci.RemoveMorphRuleChain(parseChainName(cmd), target, chainID)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
@@ -182,7 +199,7 @@ func removeRuleChain(cmd *cobra.Command, _ []string) {
func listRuleChains(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
pci, _ := newPolicyContractReaderInterface(cmd)
- chains, err := pci.ListMorphRuleChains(apeCmd.ParseChainName(cmd), target)
+ chains, err := pci.ListMorphRuleChains(parseChainName(cmd), target)
commonCmd.ExitOnErr(cmd, "list rule chains error: %w", err)
if len(chains) == 0 {
return
@@ -193,14 +210,14 @@ func listRuleChains(cmd *cobra.Command, _ []string) {
prettyJSONFormat(cmd, chains)
} else {
for _, c := range chains {
- apeCmd.PrintHumanReadableAPEChain(cmd, c)
+ parseutil.PrintHumanReadableAPEChain(cmd, c)
}
}
}
func setAdmin(cmd *cobra.Command, _ []string) {
s, _ := cmd.Flags().GetString(addrAdminFlag)
- addr, err := address.StringToUint160(s)
+ addr, err := util.Uint160DecodeStringLE(s)
commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err)
pci, ac := newPolicyContractInterface(cmd)
h, vub, err := pci.SetAdmin(addr)
@@ -214,11 +231,12 @@ func getAdmin(cmd *cobra.Command, _ []string) {
pci, _ := newPolicyContractReaderInterface(cmd)
addr, err := pci.GetAdmin()
commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err)
- cmd.Println(address.Uint160ToString(addr))
+ cmd.Println(addr.StringLE())
}
func listTargets(cmd *cobra.Command, _ []string) {
- typ := apeCmd.ParseTargetType(cmd)
+ typ, err := parseTargetType(cmd)
+ commonCmd.ExitOnErr(cmd, "parse target type error: %w", err)
pci, inv := newPolicyContractReaderInterface(cmd)
sid, it, err := pci.ListTargetsIterator(typ)
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
index 3c332c3f0..d4aedda2e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
@@ -2,14 +2,13 @@ package ape
import (
"errors"
+ "strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -19,29 +18,90 @@ import (
"github.com/spf13/viper"
)
-var errUnknownTargetType = errors.New("unknown target type")
+const (
+ ingress = "ingress"
+ s3 = "s3"
+)
+
+var mChainName = map[string]apechain.Name{
+ ingress: apechain.Ingress,
+ s3: apechain.S3,
+}
+
+var (
+ errUnknownTargetType = errors.New("unknown target type")
+ errChainIDCannotBeEmpty = errors.New("chain id cannot be empty")
+ errRuleIsNotParsed = errors.New("rule is not passed")
+ errUnsupportedChainName = errors.New("unsupported chain name")
+)
func parseTarget(cmd *cobra.Command) policyengine.Target {
- typ := apeCmd.ParseTargetType(cmd)
- name, _ := cmd.Flags().GetString(apeCmd.TargetNameFlag)
- switch typ {
- case policyengine.Namespace:
- if name == "root" {
- name = ""
- }
- return policyengine.NamespaceTarget(name)
- case policyengine.Container:
- var cnr cid.ID
- commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
- return policyengine.ContainerTarget(name)
- case policyengine.User:
- return policyengine.UserTarget(name)
- case policyengine.Group:
- return policyengine.GroupTarget(name)
- default:
- commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
+ name, _ := cmd.Flags().GetString(targetNameFlag)
+ typ, err := parseTargetType(cmd)
+
+ // interpret "root" namespace as empty
+ if typ == policyengine.Namespace && name == "root" {
+ name = ""
}
- panic("unreachable")
+
+ commonCmd.ExitOnErr(cmd, "read target type error: %w", err)
+
+ return policyengine.Target{
+ Name: name,
+ Type: typ,
+ }
+}
+
+func parseTargetType(cmd *cobra.Command) (policyengine.TargetType, error) {
+ typ, _ := cmd.Flags().GetString(targetTypeFlag)
+ switch typ {
+ case namespaceTarget:
+ return policyengine.Namespace, nil
+ case containerTarget:
+ return policyengine.Container, nil
+ case userTarget:
+ return policyengine.User, nil
+ case groupTarget:
+ return policyengine.Group, nil
+ }
+ return -1, errUnknownTargetType
+}
+
+func parseChainID(cmd *cobra.Command) apechain.ID {
+ chainID, _ := cmd.Flags().GetString(chainIDFlag)
+ if chainID == "" {
+ commonCmd.ExitOnErr(cmd, "read chain id error: %w",
+ errChainIDCannotBeEmpty)
+ }
+ return apechain.ID(chainID)
+}
+
+func parseChain(cmd *cobra.Command) *apechain.Chain {
+ chain := new(apechain.Chain)
+
+ if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules))
+ } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
+ commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath))
+ } else {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed)
+ }
+
+ chain.ID = parseChainID(cmd)
+
+ cmd.Println("Parsed chain:")
+ parseutil.PrintHumanReadableAPEChain(cmd, chain)
+
+ return chain
+}
+
+func parseChainName(cmd *cobra.Command) apechain.Name {
+ chainName, _ := cmd.Flags().GetString(chainNameFlag)
+ apeChainName, ok := mChainName[strings.ToLower(chainName)]
+ if !ok {
+ commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName)
+ }
+ return apeChainName
}
// invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface.
@@ -55,15 +115,16 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
}
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
inv := invoker.New(c, nil)
+ var ch util.Uint160
r := management.NewReader(inv)
nnsCs, err := helper.GetContractByID(r, 1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
- ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
+ ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
invokerAdapter := &invokerAdapter{
@@ -75,11 +136,10 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
}
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
- walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
- ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
+ ac, err := helper.NewLocalActor(cmd, c)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
var ch util.Uint160
diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
index 23dba14f4..5519705d4 100644
--- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
+++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -52,7 +51,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
nmHash util.Uint160
)
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return err
}
@@ -162,7 +161,9 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
}
- assert.NoError(w.Err)
+ if w.Err != nil {
+ panic(w.Err)
+ }
alphaRes, err := c.InvokeScript(w.Bytes(), nil)
if err != nil {
@@ -225,7 +226,9 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
for i := range accounts {
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
}
- assert.NoError(w.Err)
+ if w.Err != nil {
+ panic(w.Err)
+ }
res, err := c.Run(w.Bytes())
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go
index c17fb62ff..3a7f84acb 100644
--- a/cmd/frostfs-adm/internal/modules/morph/config/config.go
+++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go
@@ -26,7 +26,7 @@ import (
const forceConfigSet = "force"
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
nbuf := make([]byte, 8)
- copy(nbuf, v)
+ copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k)
}
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default:
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
}
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/constants/const.go b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
index be4041a86..a3b4f129a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/constants/const.go
+++ b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
@@ -4,6 +4,7 @@ import "time"
const (
ConsensusAccountName = "consensus"
+ ProtoConfigPath = "protocol"
// MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size
// of the invocation script.
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go
index 79685f111..6f08d1655 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -77,7 +76,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("invalid filename: %w", err)
}
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -158,7 +157,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
}
func listContainers(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -236,7 +235,9 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
putContainer(bw, ch, cnt)
- assert.NoError(bw.Err)
+ if bw.Err != nil {
+ panic(bw.Err)
+ }
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
index 543b5fcb3..5adb480da 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/cli/cmdargs"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
@@ -121,7 +120,9 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
}
}
- assert.NoError(writer.Err, "can't create deployment script")
+ if writer.Err != nil {
+ panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
+ }
if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
return err
@@ -172,8 +173,9 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
}
- assert.NoError(bw.Err, "can't create deployment script")
- if bw.Len() != start {
+ if bw.Err != nil {
+ panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
+ } else if bw.Len() != start {
writer.WriteBytes(bw.Bytes())
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
index fde58fd2b..be2134b77 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
@@ -11,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -37,7 +36,7 @@ type contractDumpInfo struct {
}
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -220,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" {
info.version = "unknown"
}
- _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
- info.name, info.version, info.hash.StringLE()))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
+ info.name, info.version, info.hash.StringLE())))
}
_ = tw.Flush()
@@ -237,17 +236,21 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
} else {
sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
- assert.NoError(sub.Err, "can't create version script")
+ if sub.Err != nil {
+ panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
+ }
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
- bw.WriteBytes(script)
+ bw.BinWriter.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}
}
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
- assert.NoError(bw.Err, "can't create version script")
+ if bw.Err != nil {
+ panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
+ }
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
deleted file mode 100644
index 4046e85e3..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package frostfsid
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- frostfsidAddSubjectKeyCmd = &cobra.Command{
- Use: "add-subject-key",
- Short: "Add a public key to the subject in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidAddSubjectKey,
- }
- frostfsidRemoveSubjectKeyCmd = &cobra.Command{
- Use: "remove-subject-key",
- Short: "Remove a public key from the subject in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidRemoveSubjectKey,
- }
-)
-
-func initFrostfsIDAddSubjectKeyCmd() {
- Cmd.AddCommand(frostfsidAddSubjectKeyCmd)
-
- ff := frostfsidAddSubjectKeyCmd.Flags()
- ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-
- ff.String(subjectAddressFlag, "", "Subject address")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
-
- ff.String(subjectKeyFlag, "", "Public key to add")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
-}
-
-func initFrostfsIDRemoveSubjectKeyCmd() {
- Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd)
-
- ff := frostfsidRemoveSubjectKeyCmd.Flags()
- ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-
- ff.String(subjectAddressFlag, "", "Subject address")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
-
- ff.String(subjectKeyFlag, "", "Public key to remove")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
-}
-
-func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) {
- addr := getFrostfsIDSubjectAddress(cmd)
- pub := getFrostfsIDSubjectKey(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "add subject key: %w", err)
-}
-
-func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) {
- addr := getFrostfsIDSubjectAddress(cmd)
- pub := getFrostfsIDSubjectKey(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "remove subject key: %w", err)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
index 7f777db98..091d6634a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
@@ -1,7 +1,6 @@
package frostfsid
import (
- "encoding/hex"
"errors"
"fmt"
"math/big"
@@ -35,16 +34,11 @@ const (
subjectNameFlag = "subject-name"
subjectKeyFlag = "subject-key"
subjectAddressFlag = "subject-address"
- extendedFlag = "extended"
+ includeNamesFlag = "include-names"
groupNameFlag = "group-name"
groupIDFlag = "group-id"
rootNamespacePlaceholder = "
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
-| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. |
-
-## `tags` subsection
-| Parameter | Type | Default value | Description |
-|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. |
-| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. |
+| Parameter | Type | Default value | Description |
+|-----------|----------|---------------|---------------------------------------------------------------------------------------------------|
+| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
# `contracts` section
Contains override values for FrostFS side-chain contract hashes. Most of the time contract
@@ -159,19 +147,15 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
switch_interval: 2m
- netmap:
- candidates:
- poll_interval: 20s
```
-| Parameter | Type | Default value | Description |
-|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
-| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
-| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
-| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
-| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
-| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
+| Parameter | Type | Default value | Description |
+| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
+| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
+| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
+| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
+| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
## `rpc_endpoint` subsection
| Parameter | Type | Default value | Description |
@@ -185,6 +169,7 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
+| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@@ -195,41 +180,20 @@ Contains configuration for each shard. Keys must be consecutive numbers starting
`default` subsection has the same format and specifies defaults for missing values.
The following table describes configuration for each shard.
-| Parameter | Type | Default value | Description |
-| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- |
-| `compression` | [Compression config](#compression-subsection) | | Compression config. |
-| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
-| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
-| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
-| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
-| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
-| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
-| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
-| `gc` | [GC config](#gc-subsection) | | GC configuration. |
-| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
-
-### `compression` subsection
-
-Contains compression config.
-
-```yaml
-compression:
- enabled: true
- level: smallest_size
- exclude_content_types:
- - audio/*
- - video/*
- estimate_compressibility: true
- estimate_compressibility_threshold: 0.7
-```
-
-| Parameter | Type | Default value | Description |
-| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `enabled` | `bool` | `false` | Flag to enable compression. |
-| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. |
-| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
-| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
-| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
+| Parameter | Type | Default value | Description |
+| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `compress` | `bool` | `false` | Flag to enable compression. |
+| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
+| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
+| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
+| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
+| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
+| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
+| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
+| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
+| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
+| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
+| `gc` | [GC config](#gc-subsection) | | GC configuration. |
### `blobstor` subsection
@@ -244,7 +208,7 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
- perm: 0o644
+ perm: 0644
size: 4194304
depth: 1
width: 4
@@ -304,7 +268,7 @@ gc:
```yaml
metabase:
path: /path/to/meta.db
- perm: 0o644
+ perm: 0644
max_batch_size: 200
max_batch_delay: 20ms
```
@@ -336,65 +300,6 @@ writecache:
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
-### `limits` subsection
-
-```yaml
-limits:
- max_read_running_ops: 10000
- max_read_waiting_ops: 1000
- max_write_running_ops: 1000
- max_write_waiting_ops: 100
- read:
- - tag: internal
- weight: 20
- limit_ops: 0
- reserved_ops: 1000
- - tag: client
- weight: 70
- reserved_ops: 10000
- - tag: background
- weight: 5
- limit_ops: 10000
- reserved_ops: 0
- - tag: writecache
- weight: 5
- limit_ops: 25000
- - tag: policer
- weight: 5
- limit_ops: 25000
- write:
- - tag: internal
- weight: 200
- limit_ops: 0
- reserved_ops: 100
- - tag: client
- weight: 700
- reserved_ops: 1000
- - tag: background
- weight: 50
- limit_ops: 1000
- reserved_ops: 0
- - tag: writecache
- weight: 50
- limit_ops: 2500
- - tag: policer
- weight: 50
- limit_ops: 2500
-```
-
-| Parameter | Type | Default value | Description |
-| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
-| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
-| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
-| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
-| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
-| `read` | `[]tag` | empty | Array of shard read settings for tags. |
-| `write` | `[]tag` | empty | Array of shard write settings for tags. |
-| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
-| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
-| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
-| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
-| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section
@@ -410,22 +315,22 @@ node:
- "Price:11"
- "UN-LOCODE:RU MSK"
- "key:value"
+ relay: false
persistent_sessions:
path: /sessions
persistent_state:
path: /state
- locode_db_path: "/path/to/locode/db"
```
-| Parameter | Type | Default value | Description |
-|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------|
-| `key` | `string` | | Path to the binary-encoded private key. |
-| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
-| `addresses` | `[]string` | | Addresses advertised in the netmap. |
-| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in ` bucket
if len(lst) == 0 {
- return bkt.Delete(item.key)
+ _ = bkt.Delete(item.key) // ignore error, best effort there
+
+ return nil
}
// if list is not empty, then update it
encodedLst, err := encodeList(lst)
if err != nil {
- return err
+ return nil // ignore error, best effort there
}
- return bkt.Put(item.key, encodedLst)
+ _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there
+ return nil
}
func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
@@ -478,47 +480,35 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error
return ErrUnknownObjectType
}
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
key: objKey,
- }); err != nil {
- return err
- }
+ })
} else {
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: parentBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
+ })
}
- if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
+ delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
name: smallBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
- if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
+ })
+ delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
name: rootBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
+ })
if expEpoch, ok := hasExpirationEpoch(obj); ok {
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: expEpochToObjectBucketName,
key: expirationEpochKey(expEpoch, cnr, addr.Object()),
- }); err != nil {
- return err
- }
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ })
+ delUniqueIndexItem(tx, namedBucketItem{
name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
key: objKey,
- }); err != nil {
- return err
- }
+ })
}
return nil
@@ -539,18 +529,16 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
+ return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
}
}
// also drop EC parent root info if current EC chunk is the last one
if !hasAnyChunks {
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
- }); err != nil {
- return err
- }
+ })
}
if ech.ParentSplitParentID() == nil {
@@ -579,15 +567,16 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
+ return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
}
}
// drop split info
- return delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)),
})
+ return nil
}
func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool {
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
index 884da23ff..a25627990 100644
--- a/pkg/local_object_storage/metabase/delete_ec_test.go
+++ b/pkg/local_object_storage/metabase/delete_ec_test.go
@@ -30,8 +30,8 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
ecChunk := oidtest.ID()
@@ -130,9 +130,17 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
require.Equal(t, 2, len(tombstonedObjects))
- _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
+ var tombstones []oid.Address
+ for _, tss := range tombstonedObjects {
+ tombstones = append(tombstones, tss.tomb)
+ }
+ inhumePrm.SetAddresses(tombstones...)
+ inhumePrm.SetGCMark()
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
+ require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects))
+
// GC finds tombstone as garbage and deletes it
garbageAddresses = nil
@@ -186,8 +194,8 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
ecChunks := make([]oid.ID, chunksCount)
@@ -366,9 +374,17 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool
require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
require.True(t, len(tombstonedObjects) == parentCount+chunksCount)
- _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
+ var tombstones []oid.Address
+ for _, tss := range tombstonedObjects {
+ tombstones = append(tombstones, tss.tomb)
+ }
+ inhumePrm.SetAddresses(tombstones...)
+ inhumePrm.SetGCMark()
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
+ require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects))
+
// GC finds tombstone as garbage and deletes it
garbageAddresses = nil
diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go
index 0329e3a73..cdfe2a203 100644
--- a/pkg/local_object_storage/metabase/delete_meta_test.go
+++ b/pkg/local_object_storage/metabase/delete_meta_test.go
@@ -23,8 +23,8 @@ func TestPutDeleteIndexAttributes(t *testing.T) {
}...)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
obj1 := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index c0762a377..fe5f7833b 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -18,7 +18,7 @@ import (
func TestDB_Delete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
parent := testutil.GenerateObjectWithCID(cnr)
@@ -65,7 +65,7 @@ func TestDB_Delete(t *testing.T) {
func TestDeleteAllChildren(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -103,7 +103,7 @@ func TestDeleteAllChildren(t *testing.T) {
func TestGraveOnlyDelete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
addr := oidtest.Address()
@@ -116,7 +116,7 @@ func TestGraveOnlyDelete(t *testing.T) {
func TestExpiredObject(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
// removing expired object should be error-free
@@ -128,7 +128,7 @@ func TestExpiredObject(t *testing.T) {
func TestDelete(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
for range 10 {
@@ -170,7 +170,7 @@ func TestDelete(t *testing.T) {
func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
addr := oidtest.Address()
diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go
index 7bd6f90a6..2e1b1dce8 100644
--- a/pkg/local_object_storage/metabase/exists.go
+++ b/pkg/local_object_storage/metabase/exists.go
@@ -19,8 +19,8 @@ import (
// ExistsPrm groups the parameters of Exists operation.
type ExistsPrm struct {
- addr oid.Address
- ecParentAddr oid.Address
+ addr oid.Address
+ paddr oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
@@ -36,9 +36,9 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) {
p.addr = addr
}
-// SetECParent is an Exists option to set objects parent.
-func (p *ExistsPrm) SetECParent(addr oid.Address) {
- p.ecParentAddr = addr
+// SetParent is an Exists option to set objects parent.
+func (p *ExistsPrm) SetParent(addr oid.Address) {
+ p.paddr = addr
}
// Exists returns the fact that the object is in the metabase.
@@ -81,7 +81,7 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
currEpoch := db.epochState.CurrentEpoch()
err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch)
+ res.exists, res.locked, err = db.exists(tx, prm.addr, prm.paddr, currEpoch)
return err
})
@@ -89,21 +89,10 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
return res, metaerr.Wrap(err)
}
-func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) {
+func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpoch uint64) (bool, bool, error) {
var locked bool
- if !ecParent.Equals(oid.Address{}) {
- st, err := objectStatus(tx, ecParent, currEpoch)
- if err != nil {
- return false, false, err
- }
- switch st {
- case 2:
- return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
- case 3:
- return false, locked, ErrObjectIsExpired
- }
-
- locked = objectLocked(tx, ecParent.Container(), ecParent.Object())
+ if !parent.Equals(oid.Address{}) {
+ locked = objectLocked(tx, parent.Container(), parent.Object())
}
// check graveyard and object expiration first
st, err := objectStatus(tx, addr, currEpoch)
@@ -153,16 +142,12 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currE
// - 2 if object is covered with tombstone;
// - 3 if object is expired.
func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
- return objectStatusWithCache(nil, tx, addr, currEpoch)
-}
-
-func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
// locked object could not be removed/marked with GC/expired
- if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) {
+ if objectLocked(tx, addr.Container(), addr.Object()) {
return 0, nil
}
- expired, err := isExpiredWithCache(bc, tx, addr, currEpoch)
+ expired, err := isExpired(tx, addr, currEpoch)
if err != nil {
return 0, err
}
@@ -171,8 +156,8 @@ func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, curr
return 3, nil
}
- graveyardBkt := getGraveyardBucket(bc, tx)
- garbageBkt := getGarbageBucket(bc, tx)
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
addrKey := addressKey(addr, make([]byte, addressKeySize))
return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil
}
@@ -232,7 +217,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e
err := splitInfo.Unmarshal(rawSplitInfo)
if err != nil {
- return nil, fmt.Errorf("unmarshal split info from root index: %w", err)
+ return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err)
}
return splitInfo, nil
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index 3045e17f1..1e4148eba 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -1,7 +1,6 @@
package meta_test
import (
- "context"
"errors"
"testing"
@@ -19,7 +18,7 @@ const currEpoch = 1000
func TestDB_Exists(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
t.Run("no object", func(t *testing.T) {
nonExist := testutil.GenerateObject()
diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go
index a1351cb6f..68144d8b1 100644
--- a/pkg/local_object_storage/metabase/expired.go
+++ b/pkg/local_object_storage/metabase/expired.go
@@ -74,11 +74,9 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
}
func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
- return isExpiredWithCache(nil, tx, addr, currEpoch)
-}
-
-func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
- b := getExpiredBucket(bc, tx, addr.Container())
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName)
+ b := tx.Bucket(bucketName)
if b == nil {
return false, nil
}
diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go
index 495c1eee7..bb98745ee 100644
--- a/pkg/local_object_storage/metabase/expired_test.go
+++ b/pkg/local_object_storage/metabase/expired_test.go
@@ -13,7 +13,7 @@ import (
func TestDB_SelectExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
containerID1 := cidtest.ID()
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index 821810c09..776f5d27c 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -1,6 +1,7 @@
package meta
import (
+ "bytes"
"context"
"fmt"
"time"
@@ -88,12 +89,8 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
}
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
- return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch)
-}
-
-func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
if checkStatus {
- st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
+ st, err := objectStatus(tx, addr, currEpoch)
if err != nil {
return nil, err
}
@@ -113,13 +110,12 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key
bucketName := make([]byte, bucketKeySize)
// check in primary index
- if b := getPrimaryBucket(bc, tx, cnr); b != nil {
- if data := b.Get(key); len(data) != 0 {
- return obj, obj.Unmarshal(data)
- }
+ data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key)
+ if len(data) != 0 {
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
- data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
+ data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
if len(data) != 0 {
return nil, getECInfoError(tx, cnr, data)
}
@@ -127,13 +123,13 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key
// if not found then check in tombstone index
data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(data)
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
// if not found then check in locker index
data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(data)
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
// if not found then check if object is a virtual
@@ -191,7 +187,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
err = child.Unmarshal(data)
if err != nil {
- return nil, fmt.Errorf("unmarshal child with parent: %w", err)
+ return nil, fmt.Errorf("can't unmarshal child with parent: %w", err)
}
par := child.Parent()
@@ -220,10 +216,10 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error {
ecInfo := objectSDK.NewECInfo()
for _, key := range keys {
// check in primary index
- objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
- if len(objData) != 0 {
+ ojbData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
+ if len(ojbData) != 0 {
obj := objectSDK.New()
- if err := obj.Unmarshal(objData); err != nil {
+ if err := obj.Unmarshal(ojbData); err != nil {
return err
}
chunk := objectSDK.ECChunk{}
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 98c428410..f0caaea70 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -25,7 +25,7 @@ import (
func TestDB_Get(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
raw := testutil.GenerateObject()
@@ -219,6 +219,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
meta.WithMaxBatchSize(batchSize),
meta.WithMaxBatchDelay(10*time.Millisecond),
)
+ defer func() { require.NoError(b, db.Close()) }()
addrs := make([]oid.Address, 0, numOfObj)
for range numOfObj {
@@ -233,7 +234,6 @@ func benchmarkGet(b *testing.B, numOfObj int) {
}
db, addrs := prepareDb(runtime.NumCPU())
- defer func() { require.NoError(b, db.Close(context.Background())) }()
b.Run("parallel", func(b *testing.B) {
b.ReportAllocs()
@@ -253,7 +253,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
})
})
- require.NoError(b, db.Close(context.Background()))
+ require.NoError(b, db.Close())
require.NoError(b, os.RemoveAll(b.Name()))
db, addrs = prepareDb(1)
diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go
index 2f23d424c..31f95d6ed 100644
--- a/pkg/local_object_storage/metabase/graveyard.go
+++ b/pkg/local_object_storage/metabase/graveyard.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -177,7 +176,7 @@ type gcHandler struct {
func (g gcHandler) handleKV(k, _ []byte) error {
o, err := garbageFromKV(k)
if err != nil {
- return fmt.Errorf("parse garbage object: %w", err)
+ return fmt.Errorf("could not parse garbage object: %w", err)
}
return g.h(o)
@@ -190,7 +189,7 @@ type graveyardHandler struct {
func (g graveyardHandler) handleKV(k, v []byte) error {
o, err := graveFromKV(k, v)
if err != nil {
- return fmt.Errorf("parse grave: %w", err)
+ return fmt.Errorf("could not parse grave: %w", err)
}
return g.h(o)
@@ -240,7 +239,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address)
func garbageFromKV(k []byte) (res GarbageObject, err error) {
err = decodeAddressFromKey(&res.addr, k)
if err != nil {
- err = fmt.Errorf("parse address: %w", err)
+ err = fmt.Errorf("could not parse address: %w", err)
}
return
@@ -256,58 +255,46 @@ func graveFromKV(k, v []byte) (res TombstonedObject, err error) {
return
}
-// InhumeTombstones deletes tombstoned objects from the
+// DropGraves deletes tombstoned objects from the
// graveyard bucket.
//
// Returns any error appeared during deletion process.
-func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) {
+func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("DropGraves", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones")
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.DropGraves")
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
if db.mode.NoMetabase() {
- return InhumeRes{}, ErrDegradedMode
+ return ErrDegradedMode
} else if db.mode.ReadOnly() {
- return InhumeRes{}, ErrReadOnlyMode
+ return ErrReadOnlyMode
}
buf := make([]byte, addressKeySize)
- prm := InhumePrm{forceRemoval: true}
- currEpoch := db.epochState.CurrentEpoch()
- var res InhumeRes
-
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
- res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)}
-
- garbageBKT := tx.Bucket(garbageBucketName)
- graveyardBKT := tx.Bucket(graveyardBucketName)
-
- bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
- if err != nil {
- return err
+ return db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ bkt := tx.Bucket(graveyardBucketName)
+ if bkt == nil {
+ return nil
}
- for i := range tss {
- if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil {
- return err
- }
- if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil {
+ for _, ts := range tss {
+ err := bkt.Delete(addressKey(ts.Address(), buf))
+ if err != nil {
return err
}
}
return nil
})
- return res, err
}
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index ebadecc04..b9c6ce28c 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -7,9 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -17,7 +15,7 @@ import (
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
var counter int
var iterGravePRM meta.GraveyardIterationPrm
@@ -44,7 +42,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
func TestDB_Iterate_OffsetNotFound(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
obj1 := testutil.GenerateObject()
obj2 := testutil.GenerateObject()
@@ -115,7 +113,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
// generate and put 4 objects
@@ -204,7 +202,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
// generate and put 4 objects
@@ -305,7 +303,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
func TestDB_IterateOverGarbage_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
// generate and put 4 objects
obj1 := testutil.GenerateObject()
@@ -395,9 +393,9 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
require.False(t, iWasCalled)
}
-func TestDB_InhumeTombstones(t *testing.T) {
+func TestDB_DropGraves(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
// generate and put 2 objects
@@ -412,20 +410,9 @@ func TestDB_InhumeTombstones(t *testing.T) {
err = putBig(db, obj2)
require.NoError(t, err)
- id1, _ := obj1.ID()
- id2, _ := obj2.ID()
- ts := objectSDK.NewTombstone()
- ts.SetMembers([]oid.ID{id1, id2})
- objTs := objectSDK.New()
- objTs.SetContainerID(cnr)
- objTs.SetType(objectSDK.TypeTombstone)
-
- data, _ := ts.Marshal()
- objTs.SetPayload(data)
- require.NoError(t, objectSDK.CalculateAndSetID(objTs))
- require.NoError(t, putBig(db, objTs))
-
- addrTombstone := object.AddressOf(objTs)
+ // inhume with tombstone
+ addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
@@ -448,11 +435,8 @@ func TestDB_InhumeTombstones(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 2, counter)
- res, err := db.InhumeTombstones(context.Background(), buriedTS)
+ err = db.DropGraves(context.Background(), buriedTS)
require.NoError(t, err)
- require.EqualValues(t, 1, res.LogicInhumed())
- require.EqualValues(t, 0, res.UserInhumed())
- require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID())
counter = 0
iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error {
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 76018fb61..12f27d330 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -205,7 +205,7 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
success = err == nil
if success {
for _, addr := range prm.target {
- storagelog.Write(ctx, db.log,
+ storagelog.Write(db.log,
storagelog.AddressField(addr),
storagelog.OpField("metabase INHUME"))
}
@@ -217,93 +217,85 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes
garbageBKT := tx.Bucket(garbageBucketName)
graveyardBKT := tx.Bucket(graveyardBucketName)
- bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, &prm)
if err != nil {
return err
}
buf := make([]byte, addressKeySize)
for i := range prm.target {
- if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil {
+ id := prm.target[i].Object()
+ cnr := prm.target[i].Container()
+
+ // prevent locked objects to be inhumed
+ if !prm.forceRemoval && objectLocked(tx, cnr, id) {
+ return new(apistatus.ObjectLocked)
+ }
+
+ var lockWasChecked bool
+
+ // prevent lock objects to be inhumed
+ // if `Inhume` was called not with the
+ // `WithForceGCMark` option
+ if !prm.forceRemoval {
+ if isLockObject(tx, cnr, id) {
+ return ErrLockObjectRemoval
+ }
+
+ lockWasChecked = true
+ }
+
+ obj, err := db.get(tx, prm.target[i], buf, false, true, epoch)
+ targetKey := addressKey(prm.target[i], buf)
+ var ecErr *objectSDK.ECInfoError
+ if err == nil {
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
+ if err != nil {
+ return err
+ }
+ } else if errors.As(err, &ecErr) {
+ err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
+ if err != nil {
+ return err
+ }
+ }
+
+ if prm.tomb != nil {
+ var isTomb bool
+ isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
+ if err != nil {
+ return err
+ }
+
+ if isTomb {
+ continue
+ }
+ }
+
+ // consider checking if target is already in graveyard?
+ err = bkt.Put(targetKey, value)
+ if err != nil {
return err
}
+
+ if prm.lockObjectHandling {
+ // do not perform lock check if
+ // it was already called
+ if lockWasChecked {
+ // inhumed object is not of
+ // the LOCK type
+ continue
+ }
+
+ if isLockObject(tx, cnr, id) {
+ res.deletedLockObj = append(res.deletedLockObj, prm.target[i])
+ }
+ }
}
return db.applyInhumeResToCounters(tx, res)
}
-func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error {
- id := addr.Object()
- cnr := addr.Container()
- tx := bkt.Tx()
-
- // prevent locked objects to be inhumed
- if !prm.forceRemoval && objectLocked(tx, cnr, id) {
- return new(apistatus.ObjectLocked)
- }
-
- var lockWasChecked bool
-
- // prevent lock objects to be inhumed
- // if `Inhume` was called not with the
- // `WithForceGCMark` option
- if !prm.forceRemoval {
- if isLockObject(tx, cnr, id) {
- return ErrLockObjectRemoval
- }
-
- lockWasChecked = true
- }
-
- obj, err := db.get(tx, addr, buf, false, true, epoch)
- targetKey := addressKey(addr, buf)
- var ecErr *objectSDK.ECInfoError
- if err == nil {
- err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
- if err != nil {
- return err
- }
- } else if errors.As(err, &ecErr) {
- err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
- if err != nil {
- return err
- }
- }
-
- if prm.tomb != nil {
- var isTomb bool
- isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
- if err != nil {
- return err
- }
-
- if isTomb {
- return nil
- }
- }
-
- // consider checking if target is already in graveyard?
- err = bkt.Put(targetKey, value)
- if err != nil {
- return err
- }
-
- if prm.lockObjectHandling {
- // do not perform lock check if
- // it was already called
- if lockWasChecked {
- // inhumed object is not of
- // the LOCK type
- return nil
- }
-
- if isLockObject(tx, cnr, id) {
- res.deletedLockObj = append(res.deletedLockObj, addr)
- }
- }
- return nil
-}
-
func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes,
garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket,
ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte,
@@ -342,10 +334,10 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I
}
func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
- if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil {
+ if err := db.updateShardObjectCounter(tx, logical, res.LogicInhumed(), false); err != nil {
return err
}
- if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil {
+ if err := db.updateShardObjectCounter(tx, user, res.UserInhumed(), false); err != nil {
return err
}
@@ -362,7 +354,7 @@ func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
// 1. tombstone address if Inhume was called with
// a Tombstone
// 2. zeroValue if Inhume was called with a GC mark
-func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
+func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm *InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
if prm.tomb != nil {
targetBucket = graveyardBKT
tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
@@ -373,7 +365,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
if data != nil {
err := targetBucket.Delete(tombKey)
if err != nil {
- return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err)
+ return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err)
}
}
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
index 180713287..32e412c79 100644
--- a/pkg/local_object_storage/metabase/inhume_ec_test.go
+++ b/pkg/local_object_storage/metabase/inhume_ec_test.go
@@ -25,8 +25,8 @@ func TestInhumeECObject(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
ecChunk := oidtest.ID()
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index 786d10396..277316f7b 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -17,7 +17,7 @@ import (
func TestDB_Inhume(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
raw := testutil.GenerateObject()
testutil.AddAttribute(raw, "foo", "bar")
@@ -37,7 +37,7 @@ func TestDB_Inhume(t *testing.T) {
func TestInhumeTombOnTomb(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
var (
err error
@@ -107,7 +107,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
func TestInhumeLocked(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
locked := oidtest.Address()
diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go
index 9cccd7dad..d44c51fb2 100644
--- a/pkg/local_object_storage/metabase/iterators.go
+++ b/pkg/local_object_storage/metabase/iterators.go
@@ -3,6 +3,7 @@ package meta
import (
"context"
"errors"
+ "fmt"
"strconv"
"time"
@@ -11,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
@@ -109,6 +111,70 @@ func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler)
return nil
}
+// IterateCoveredByTombstones iterates over all objects in DB which are covered
+// by tombstone with string address from tss. Locked objects are not included
+// (do not confuse with objects of type LOCK).
+//
+// If h returns ErrInterruptIterator, nil returns immediately.
+// Returns other errors of h directly.
+//
+// Does not modify tss.
+func (db *DB) IterateCoveredByTombstones(ctx context.Context, tss map[string]oid.Address, h func(oid.Address) error) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateCoveredByTombstones", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateCoveredByTombstones")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ return db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateCoveredByTombstones(tx, tss, h)
+ })
+}
+
+func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error {
+ bktGraveyard := tx.Bucket(graveyardBucketName)
+
+ err := bktGraveyard.ForEach(func(k, v []byte) error {
+ var addr oid.Address
+ if err := decodeAddressFromKey(&addr, v); err != nil {
+ return err
+ }
+ if _, ok := tss[addr.EncodeToString()]; ok {
+ var addr oid.Address
+
+ err := decodeAddressFromKey(&addr, k)
+ if err != nil {
+ return fmt.Errorf("could not parse address of the object under tombstone: %w", err)
+ }
+
+ if objectLocked(tx, addr.Container(), addr.Object()) {
+ return nil
+ }
+
+ return h(addr)
+ }
+
+ return nil
+ })
+
+ if errors.Is(err, ErrInterruptIterator) {
+ err = nil
+ }
+
+ return err
+}
+
func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error {
var cid cid.ID
var oid oid.ID
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 4c9579965..646dc196c 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -17,7 +18,7 @@ import (
func TestDB_IterateExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
const epoch = 13
@@ -66,3 +67,65 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt
return object2.AddressOf(obj)
}
+
+func TestDB_IterateCoveredByTombstones(t *testing.T) {
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
+
+ cnr := cidtest.ID()
+ ts := oidtest.Address()
+ protected1 := oidtest.Address()
+ protected2 := oidtest.Address()
+ protectedLocked := oidtest.Address()
+ garbage := oidtest.Address()
+ ts.SetContainer(cnr)
+ protected1.SetContainer(cnr)
+ protected2.SetContainer(cnr)
+ protectedLocked.SetContainer(cnr)
+
+ var prm meta.InhumePrm
+ var err error
+
+ prm.SetAddresses(protected1, protected2, protectedLocked)
+ prm.SetTombstoneAddress(ts)
+
+ _, err = db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetAddresses(garbage)
+ prm.SetGCMark()
+
+ _, err = db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+
+ var handled []oid.Address
+
+ tss := map[string]oid.Address{
+ ts.EncodeToString(): ts,
+ }
+
+ err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
+ handled = append(handled, addr)
+ return nil
+ })
+ require.NoError(t, err)
+
+ require.Len(t, handled, 3)
+ require.Contains(t, handled, protected1)
+ require.Contains(t, handled, protected2)
+ require.Contains(t, handled, protectedLocked)
+
+ err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
+ require.NoError(t, err)
+
+ handled = handled[:0]
+
+ err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
+ handled = append(handled, addr)
+ return nil
+ })
+ require.NoError(t, err)
+
+ require.Len(t, handled, 2)
+ require.NotContains(t, handled, protectedLocked)
+}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 2a0bd7f6a..b007ef0da 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -87,8 +87,7 @@ type CountAliveObjectsInContainerPrm struct {
}
// ListWithCursor lists physical objects available in metabase starting from
-// cursor. Includes objects of all types. Does not include inhumed and expired
-// objects.
+// cursor. Includes objects of all types. Does not include inhumed objects.
// Use cursor value from response for consecutive requests.
//
// Returns ErrEndOfListing if there are no more objects to return or count
@@ -139,12 +138,11 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
var containerID cid.ID
var offset []byte
- bc := newBucketCache()
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
rawAddr := make([]byte, cidSize, addressKeySize)
- currEpoch := db.epochState.CurrentEpoch()
-
loop:
for ; name != nil; name, _ = c.Next() {
cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name)
@@ -168,8 +166,8 @@ loop:
bkt := tx.Bucket(name)
if bkt != nil {
copy(rawAddr, cidRaw)
- result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
- result, count, cursor, threshold, currEpoch)
+ result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
+ result, count, cursor, threshold)
if err != nil {
return nil, nil, err
}
@@ -187,7 +185,8 @@ loop:
if offset != nil {
// new slice is much faster but less memory efficient
// we need to copy, because offset exists during bbolt tx
- cursor.inBucketOffset = bytes.Clone(offset)
+ cursor.inBucketOffset = make([]byte, len(offset))
+ copy(cursor.inBucketOffset, offset)
}
if len(result) == 0 {
@@ -196,29 +195,29 @@ loop:
// new slice is much faster but less memory efficient
// we need to copy, because bucketName exists during bbolt tx
- cursor.bucketName = bytes.Clone(bucketName)
+ cursor.bucketName = make([]byte, len(bucketName))
+ copy(cursor.bucketName, bucketName)
return result, cursor, nil
}
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects.
-func selectNFromBucket(
- bc *bucketCache,
- bkt *bbolt.Bucket, // main bucket
+func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
objType objectSDK.Type, // type of the objects stored in the main bucket
+ graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID
to []objectcore.Info, // listing result
limit int, // stop listing at `limit` items in result
cursor *Cursor, // start from cursor object
threshold bool, // ignore cursor and start immediately
- currEpoch uint64,
) ([]objectcore.Info, []byte, *Cursor, error) {
if cursor == nil {
cursor = new(Cursor)
}
+ count := len(to)
c := bkt.Cursor()
k, v := c.First()
@@ -230,7 +229,7 @@ func selectNFromBucket(
}
for ; k != nil; k, v = c.Next() {
- if len(to) >= limit {
+ if count >= limit {
break
}
@@ -240,25 +239,17 @@ func selectNFromBucket(
}
offset = k
- graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
- garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue
}
- var o objectSDK.Object
- if err := o.Unmarshal(v); err != nil {
- return nil, nil, nil, err
- }
-
- expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
- if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
- continue
- }
-
var isLinkingObj bool
var ecInfo *objectcore.ECInfo
if objType == objectSDK.TypeRegular {
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return nil, nil, nil, err
+ }
isLinkingObj = isLinkObject(&o)
ecHeader := o.ECHeader()
if ecHeader != nil {
@@ -274,6 +265,7 @@ func selectNFromBucket(
a.SetContainer(cnt)
a.SetObject(obj)
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
+ count++
}
return to, offset, cursor, nil
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index 02985991c..203802ec0 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -3,17 +3,14 @@ package meta_test
import (
"context"
"errors"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
@@ -21,8 +18,6 @@ import (
func BenchmarkListWithCursor(b *testing.B) {
db := listWithCursorPrepareDB(b)
- defer func() { require.NoError(b, db.Close(context.Background())) }()
-
b.Run("1 item", func(b *testing.B) {
benchmarkListWithCursor(b, db, 1)
})
@@ -38,6 +33,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
NoSync: true,
})) // faster single-thread generation
+ defer func() { require.NoError(b, db.Close()) }()
obj := testutil.GenerateObject()
for i := range 100_000 { // should be a multiple of all batch sizes
@@ -59,7 +55,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
for range b.N {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
- if !errors.Is(err, meta.ErrEndOfListing) {
+ if err != meta.ErrEndOfListing {
b.Fatalf("error: %v", err)
}
prm.SetCursor(nil)
@@ -74,15 +70,13 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
func TestLisObjectsWithCursor(t *testing.T) {
t.Parallel()
- const (
- currEpoch = 100
- expEpoch = currEpoch - 1
- containers = 5
- total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired
- )
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
- db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ const (
+ containers = 5
+ total = containers * 4 // regular + ts + child + lock
+ )
expected := make([]object.Info, 0, total)
@@ -132,26 +126,6 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, child)
require.NoError(t, err)
expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
-
- // add expired object (do not include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
- require.NoError(t, metaPut(db, obj, nil))
-
- // add non-expired object (include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch))
- require.NoError(t, metaPut(db, obj, nil))
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
-
- // add locked expired object (include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- objID := oidtest.ID()
- obj.SetID(objID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
- require.NoError(t, metaPut(db, obj, nil))
- require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID}))
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
}
t.Run("success with various count", func(t *testing.T) {
@@ -189,7 +163,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
const total = 5
@@ -251,7 +225,7 @@ func TestIterateOver(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
const total uint64 = 5
for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} {
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index f4cb9e53b..6b78ef392 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -4,10 +4,8 @@ import (
"bytes"
"context"
"fmt"
- "slices"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -64,7 +62,9 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.
return ErrReadOnlyMode
}
- assert.False(len(locked) == 0, "empty locked list")
+ if len(locked) == 0 {
+ panic("empty locked list")
+ }
err := db.lockInternal(locked, cnr, locker)
success = err == nil
@@ -162,11 +162,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
// checks if specified object is locked in the specified container.
func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- return objectLockedWithCache(nil, tx, idCnr, idObj)
-}
-
-func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- bucketLocked := getLockedBucket(bc, tx)
+ bucketLocked := tx.Bucket(bucketNameLocked)
if bucketLocked != nil {
key := make([]byte, cidSize)
idCnr.Encode(key)
@@ -180,7 +176,7 @@ func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oi
}
// return `LOCK` id's if specified object is locked in the specified container.
-func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
+func getLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
var lockers []oid.ID
bucketLocked := tx.Bucket(bucketNameLocked)
if bucketLocked != nil {
@@ -254,7 +250,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres
unlockedObjects = append(unlockedObjects, addr)
} else {
// exclude locker
- keyLockers = slices.Delete(keyLockers, i, i+1)
+ keyLockers = append(keyLockers[:i], keyLockers[i+1:]...)
v, err = encodeList(keyLockers)
if err != nil {
@@ -355,20 +351,20 @@ func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, e
return res, err
}
-// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found
+// GetLocked return `LOCK` id's if provided object is locked by any `LOCK`. Not found
// object is considered as non-locked.
//
// Returns only non-logical errors related to underlying database.
-func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
+func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("GetLocked", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks",
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocked",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
@@ -381,7 +377,7 @@ func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err
return res, ErrDegradedMode
}
err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res, err = getLocks(tx, addr.Container(), addr.Object())
+ res, err = getLocked(tx, addr.Container(), addr.Object())
return nil
}))
success = err == nil
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 341ff9ad1..9601cb2be 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -21,7 +21,7 @@ func TestDB_Lock(t *testing.T) {
cnr := cidtest.ID()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
t.Run("empty locked list", func(t *testing.T) {
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
@@ -187,7 +187,7 @@ func TestDB_Lock_Expired(t *testing.T) {
es := &epochState{e: 123}
db := newDB(t, meta.WithEpochState(es))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
// put an object
addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
@@ -209,7 +209,7 @@ func TestDB_IsLocked(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
// existing and locked objs
diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go
index 7edb96384..2032ed6b2 100644
--- a/pkg/local_object_storage/metabase/mode.go
+++ b/pkg/local_object_storage/metabase/mode.go
@@ -1,7 +1,6 @@
package meta
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -9,7 +8,7 @@ import (
// SetMode sets the metabase mode of operation.
// If the mode assumes no operation metabase, the database is closed.
-func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
+func (db *DB) SetMode(m mode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
@@ -18,20 +17,20 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
}
if !db.mode.NoMetabase() {
- if err := db.Close(ctx); err != nil {
- return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
if m.NoMetabase() {
db.boltDB = nil
} else {
- err := db.openDB(ctx, m)
+ err := db.openDB(m)
if err == nil && !m.ReadOnly() {
- err = db.Init(ctx)
+ err = db.Init()
}
if err != nil {
- return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go
index 28b42283f..1b9f60055 100644
--- a/pkg/local_object_storage/metabase/mode_test.go
+++ b/pkg/local_object_storage/metabase/mode_test.go
@@ -25,13 +25,13 @@ func Test_Mode(t *testing.T) {
require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init(context.Background()))
+ require.NoError(t, bdb.Init())
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close(context.Background()))
+ require.NoError(t, bdb.Close())
require.NoError(t, bdb.Open(context.Background(), mode.Degraded))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init(context.Background()))
+ require.NoError(t, bdb.Init())
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close(context.Background()))
+ require.NoError(t, bdb.Close())
}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 5e1bbfe9e..09c5e04ad 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -100,7 +100,7 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
})
if err == nil {
success = true
- storagelog.Write(ctx, db.log,
+ storagelog.Write(db.log,
storagelog.AddressField(objectCore.AddressOf(prm.obj)),
storagelog.OpField("metabase PUT"))
}
@@ -120,15 +120,9 @@ func (db *DB) put(tx *bbolt.Tx,
return PutRes{}, errors.New("missing container in object")
}
- var ecParentAddress oid.Address
- if ecHeader := obj.ECHeader(); ecHeader != nil {
- ecParentAddress.SetContainer(cnr)
- ecParentAddress.SetObject(ecHeader.Parent())
- }
-
isParent := si != nil
- exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch)
+ exists, _, err := db.exists(tx, objectCore.AddressOf(obj), oid.Address{}, currEpoch)
var splitInfoError *objectSDK.SplitInfoError
if errors.As(err, &splitInfoError) {
@@ -179,18 +173,18 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
err := putUniqueIndexes(tx, obj, si, id)
if err != nil {
- return fmt.Errorf("put unique indexes: %w", err)
+ return fmt.Errorf("can't put unique indexes: %w", err)
}
err = updateListIndexes(tx, obj, putListIndexItem)
if err != nil {
- return fmt.Errorf("put list indexes: %w", err)
+ return fmt.Errorf("can't put list indexes: %w", err)
}
if indexAttributes {
err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
if err != nil {
- return fmt.Errorf("put fake bucket tree indexes: %w", err)
+ return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
}
}
@@ -249,7 +243,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad
}
rawObject, err := obj.CutPayload().Marshal()
if err != nil {
- return fmt.Errorf("marshal object header: %w", err)
+ return fmt.Errorf("can't marshal object header: %w", err)
}
return putUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
@@ -474,7 +468,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck
func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
}
data, err := update(bkt.Get(item.key), item.val)
@@ -491,12 +485,12 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
}
fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
if err != nil {
- return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err)
+ return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
}
return fkbtRoot.Put(item.val, zeroValue)
@@ -505,19 +499,19 @@ func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
}
lst, err := decodeList(bkt.Get(item.key))
if err != nil {
- return fmt.Errorf("decode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("can't decode leaf list %v: %w", item.key, err)
}
lst = append(lst, item.val)
encodedLst, err := encodeList(lst)
if err != nil {
- return fmt.Errorf("encode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("can't encode leaf list %v: %w", item.key, err)
}
return bkt.Put(item.key, encodedLst)
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index f37ed4cf2..914f5ef06 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -46,7 +46,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(runtime.NumCPU()))
- defer func() { require.NoError(b, db.Close(context.Background())) }()
+ defer func() { require.NoError(b, db.Close()) }()
// Ensure the benchmark is bound by CPU and not waiting batch-delay time.
b.SetParallelism(1)
@@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(1))
- defer func() { require.NoError(b, db.Close(context.Background())) }()
+ defer func() { require.NoError(b, db.Close()) }()
var index atomic.Int64
index.Store(-1)
objs := prepareObjects(b.N)
@@ -84,7 +84,7 @@ func BenchmarkPut(b *testing.B) {
func TestDB_PutBlobovniczaUpdate(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
raw1 := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4}
diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go
index 5f0956f0b..993079dce 100644
--- a/pkg/local_object_storage/metabase/reset_test.go
+++ b/pkg/local_object_storage/metabase/reset_test.go
@@ -30,14 +30,14 @@ func TestResetDropsContainerBuckets(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
for idx := range 100 {
var putPrm PutPrm
putPrm.SetObject(testutil.GenerateObject())
- putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx))
+ putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
_, err := db.Put(context.Background(), putPrm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 60da50671..f802036be 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -131,7 +131,6 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
res := make([]oid.Address, 0, len(mAddr))
- bc := newBucketCache()
for a, ind := range mAddr {
if ind != expLen {
continue // ignore objects with unmatched fast filters
@@ -146,7 +145,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(id)
- st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
+ st, err := objectStatus(tx, addr, currEpoch)
if err != nil {
return nil, err
}
@@ -154,7 +153,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
continue // ignore removed objects
}
- addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch)
+ addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch)
if !match {
continue // ignore objects with unmatched slow filters
}
@@ -452,13 +451,13 @@ func (db *DB) selectObjectID(
}
// matchSlowFilters return true if object header is matched by all slow filters.
-func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
+func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
result := addr
if len(f) == 0 {
return result, true
}
- obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch)
+ obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch)
if err != nil {
return result, false
}
@@ -516,9 +515,9 @@ func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address,
return result, true
}
-func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
+func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
buf := make([]byte, addressKeySize)
- obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch)
+ obj, err := db.get(tx, addr, buf, true, false, currEpoch)
if err != nil {
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
@@ -528,7 +527,7 @@ func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Ad
continue
}
addr.SetObject(objID)
- obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch)
+ obj, err = db.get(tx, addr, buf, true, false, currEpoch)
if err == nil {
return obj, true, nil
}
@@ -566,7 +565,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt
case v2object.FilterHeaderContainerID: // support deprecated field
err := res.cnr.DecodeString(filters[i].Value())
if err != nil {
- return filterGroup{}, fmt.Errorf("parse container id: %w", err)
+ return filterGroup{}, fmt.Errorf("can't parse container id: %w", err)
}
res.withCnrFilter = true
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index ce2156d2e..6f48607be 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -38,7 +38,7 @@ func testSelectUserAttributes(t *testing.T, index bool) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -200,7 +200,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -354,7 +354,7 @@ func TestDB_SelectInhume(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -385,7 +385,7 @@ func TestDB_SelectPayloadHash(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -456,7 +456,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -564,7 +564,7 @@ func TestDB_SelectObjectID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -680,7 +680,7 @@ func TestDB_SelectOwnerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -786,7 +786,7 @@ func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
ecChunk1 := oidtest.ID()
@@ -865,7 +865,7 @@ func TestDB_RawHead_SplitInfo(t *testing.T) {
)
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -906,7 +906,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
t.Run("first last, then linking", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
require.NoError(t, metaPut(db, lastPart, nil))
require.NoError(t, metaPut(db, linking, nil))
@@ -930,7 +930,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
})
t.Run("first linking, then last", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
require.NoError(t, metaPut(db, linking, nil))
require.NoError(t, metaPut(db, lastPart, nil))
@@ -954,7 +954,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
})
t.Run("only last part", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
require.NoError(t, metaPut(db, lastPart, nil))
@@ -984,7 +984,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) {
)
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -1052,7 +1052,7 @@ func TestDB_SelectSplitID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -1109,7 +1109,7 @@ func TestDB_SelectContainerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -1157,7 +1157,7 @@ func TestDB_SelectContainerID(t *testing.T) {
func BenchmarkSelect(b *testing.B) {
const objCount = 1000
db := newDB(b)
- defer func() { require.NoError(b, db.Close(context.Background())) }()
+ defer func() { require.NoError(b, db.Close()) }()
cid := cidtest.ID()
@@ -1199,7 +1199,7 @@ func TestExpiredObjects(t *testing.T) {
t.Parallel()
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
cidExp, _ := exp.ContainerID()
@@ -1216,8 +1216,6 @@ func TestExpiredObjects(t *testing.T) {
}
func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) {
- b.ReportAllocs()
-
var prm meta.SelectPrm
prm.SetContainerID(cid)
prm.SetFilters(fs)
diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go
index 72618b1a0..88446494e 100644
--- a/pkg/local_object_storage/metabase/shard_id.go
+++ b/pkg/local_object_storage/metabase/shard_id.go
@@ -2,7 +2,6 @@ package meta
import (
"bytes"
- "context"
"errors"
"fmt"
"os"
@@ -22,7 +21,7 @@ var (
// If id is missing, returns nil, nil.
//
// GetShardID does not report any metrics.
-func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) {
+func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = mode
@@ -31,14 +30,14 @@ func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error
return nil, nil
}
- if err := db.openDB(ctx, mode); err != nil {
- return nil, fmt.Errorf("open metabase: %w", err)
+ if err := db.openDB(mode); err != nil {
+ return nil, fmt.Errorf("failed to open metabase: %w", err)
}
id, err := db.readShardID()
if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
+ err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
}
return id, metaerr.Wrap(err)
@@ -60,7 +59,7 @@ func (db *DB) readShardID() ([]byte, error) {
// SetShardID sets metabase operation mode
// and writes shard id to db.
-func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error {
+func (db *DB) SetShardID(id []byte, mode metamode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = mode
@@ -69,8 +68,8 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err
return ErrReadOnlyMode
}
- if err := db.openDB(ctx, mode); err != nil {
- return fmt.Errorf("open metabase: %w", err)
+ if err := db.openDB(mode); err != nil {
+ return fmt.Errorf("failed to open metabase: %w", err)
}
err := db.writeShardID(id)
@@ -79,7 +78,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err
}
if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
+ err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
}
return metaerr.Wrap(err)
diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go
index 8f2376503..6d620b41a 100644
--- a/pkg/local_object_storage/metabase/storage_id.go
+++ b/pkg/local_object_storage/metabase/storage_id.go
@@ -35,7 +35,7 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
-func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) {
+func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
var (
startedAt = time.Now()
success = false
@@ -53,32 +53,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, er
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
- var res StorageIDRes
if db.mode.NoMetabase() {
return res, ErrDegradedMode
}
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- res.id = db.storageID(tx, prm.addr)
- return nil
+ err = db.boltDB.View(func(tx *bbolt.Tx) error {
+ res.id, err = db.storageID(tx, prm.addr)
+
+ return err
})
success = err == nil
return res, metaerr.Wrap(err)
}
-func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte {
+func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) {
key := make([]byte, bucketKeySize)
smallBucket := tx.Bucket(smallBucketName(addr.Container(), key))
if smallBucket == nil {
- return nil
+ return nil, nil
}
storageID := smallBucket.Get(objectKey(addr.Object(), key))
if storageID == nil {
- return nil
+ return nil, nil
}
- return bytes.Clone(storageID)
+ return bytes.Clone(storageID), nil
}
// UpdateStorageIDPrm groups the parameters of UpdateStorageID operation.
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index fef680159..a86e42bd2 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -15,7 +15,7 @@ func TestDB_StorageID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
raw1 := testutil.GenerateObject()
raw2 := testutil.GenerateObject()
@@ -79,7 +79,7 @@ func TestPutWritecacheDataRace(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
putStorageID := []byte{1, 2, 3}
wcStorageID := []byte{1, 2, 3, 4, 5}
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index 4948f3424..bcf72f440 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -95,7 +95,7 @@ func compactDB(db *bbolt.DB) error {
NoSync: true,
})
if err != nil {
- return fmt.Errorf("open new metabase to compact: %w", err)
+ return fmt.Errorf("can't open new metabase to compact: %w", err)
}
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
@@ -292,7 +292,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
}
expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
if err != nil {
- return fmt.Errorf("parse expiration epoch: %w", err)
+ return fmt.Errorf("could not parse expiration epoch: %w", err)
}
expirationEpochBucket := b.Bucket(attrValue)
attrKeyValueC := expirationEpochBucket.Cursor()
@@ -360,7 +360,7 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv
return nil
}
last = keys[len(keys)-1]
- cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys)
+ cnt, err := dropNonIndexedUserAttributeBuckets(db, cs, keys)
if err != nil {
log("deleting user attribute buckets completed with an error:", err)
return err
@@ -376,8 +376,8 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv
}
}
-func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
- keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs)
+func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
+ keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs)
if err != nil {
return 0, fmt.Errorf("select non indexed user attributes: %w", err)
}
@@ -394,12 +394,12 @@ func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs co
return uint64(len(keysToDrop)), nil
}
-func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
+func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
var keysToDrop [][]byte
for _, key := range keys {
attr, ok := attributeFromAttributeBucket(key)
if !ok {
- return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
+ return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
}
if !IsAtrributeIndexed(attr) {
keysToDrop = append(keysToDrop, key)
@@ -407,9 +407,9 @@ func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs contai
}
contID, ok := cidFromAttributeBucket(key)
if !ok {
- return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
+ return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
}
- info, err := cs.Info(ctx, contID)
+ info, err := cs.Info(contID)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index c90de4dd6..aeb14aeb6 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -34,18 +34,18 @@ func TestUpgradeV2ToV3(t *testing.T) {
}()
db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t)))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion)
- require.NoError(t, db.Close(context.Background()))
+ require.ErrorIs(t, db.Init(), ErrOutdatedVersion)
+ require.NoError(t, db.Close())
require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
fmt.Println()
}
type testContainerInfoProvider struct{}
-func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) {
+func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) {
return container.Info{}, nil
}
@@ -87,7 +87,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
db.boltDB.AllocSize = allocSize
db.boltDB.NoSync = true
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
containers := make([]cid.ID, containersCount)
for i := range containers {
containers[i] = cidtest.ID()
@@ -113,7 +113,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects generated")
+ db.log.Info("simple objects generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// complex objects
@@ -137,7 +137,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "complex objects generated")
+ db.log.Info("complex objects generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects deleted by gc marks
@@ -159,7 +159,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects deleted by gc marks generated")
+ db.log.Info("simple objects deleted by gc marks generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(10000)
// simple objects deleted by tombstones
@@ -189,7 +189,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects deleted by tombstones generated")
+ db.log.Info("simple objects deleted by tombstones generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects locked by locks
@@ -216,7 +216,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects locked by locks generated")
+ db.log.Info("simple objects locked by locks generated")
require.NoError(t, db.boltDB.Sync())
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 4ad83332b..0a2f91a47 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -232,11 +231,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
epoch := binary.BigEndian.Uint64(key)
var cnr cid.ID
if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err)
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err)
}
var obj oid.ID
if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err)
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err)
}
return epoch, cnr, obj, nil
}
@@ -279,7 +278,9 @@ func objectKey(obj oid.ID, key []byte) []byte {
//
// firstIrregularObjectType(tx, cnr, obj) usage allows getting object type.
func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type {
- assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType")
+ if len(objs) == 0 {
+ panic("empty object list in firstIrregularObjectType")
+ }
var keys [2][1 + cidSize]byte
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index fbc0f1ad9..048bb9af6 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
- return fmt.Errorf("create auxiliary bucket: %w", err)
+ return fmt.Errorf("can't create auxiliary bucket: %w", err)
}
return b.Put(versionKey, data)
}
diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go
index b373fb32e..75229a1b4 100644
--- a/pkg/local_object_storage/metabase/version_test.go
+++ b/pkg/local_object_storage/metabase/version_test.go
@@ -45,25 +45,25 @@ func TestVersion(t *testing.T) {
t.Run("simple", func(t *testing.T) {
db := newDB(t)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
t.Run("reopen", func(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
})
t.Run("old data", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite))
+ require.NoError(t, db.SetShardID([]byte{1, 2, 3, 4}, mode.ReadWrite))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
t.Run("invalid version", func(t *testing.T) {
db := newDB(t)
@@ -71,37 +71,37 @@ func TestVersion(t *testing.T) {
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return updateVersion(tx, version+1)
}))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.Error(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.Error(t, db.Init())
+ require.NoError(t, db.Close())
t.Run("reset", func(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.Reset())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
})
t.Run("incompleted upgrade", func(t *testing.T) {
db := newDB(t)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue)
}))
- require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade)
- require.NoError(t, db.Close(context.Background()))
+ require.ErrorIs(t, db.Init(), ErrIncompletedUpgrade)
+ require.NoError(t, db.Close())
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(shardInfoBucket).Delete(upgradeKey)
}))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
})
}
diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go
index 3156751f2..22b951a41 100644
--- a/pkg/local_object_storage/pilorama/bench_test.go
+++ b/pkg/local_object_storage/pilorama/bench_test.go
@@ -28,8 +28,8 @@ func BenchmarkCreate(b *testing.B) {
WithPath(filepath.Join(tmpDir, "test.db")),
WithMaxBatchSize(runtime.GOMAXPROCS(0)))
require.NoError(b, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(b, f.Init(context.Background()))
- defer func() { require.NoError(b, f.Close(context.Background())) }()
+ require.NoError(b, f.Init())
+ defer func() { require.NoError(b, f.Close()) }()
b.Cleanup(func() {
require.NoError(b, os.RemoveAll(tmpDir))
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 897b37ea0..7bce1f340 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -91,7 +91,7 @@ func NewBoltForest(opts ...Option) ForestStorage {
return &b
}
-func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
+func (t *boltForest) SetMode(m mode.Mode) error {
t.modeMtx.Lock()
defer t.modeMtx.Unlock()
@@ -99,14 +99,14 @@ func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
return nil
}
- err := t.Close(ctx)
+ err := t.Close()
if err == nil && !m.NoMetabase() {
if err = t.openBolt(m); err == nil {
- err = t.Init(ctx)
+ err = t.Init()
}
}
if err != nil {
- return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
+ return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
}
t.mode = m
@@ -128,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
readOnly := m.ReadOnly()
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err))
+ return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err))
}
opts := *bbolt.DefaultOptions
@@ -139,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
t.db, err = bbolt.Open(t.path, t.perm, &opts)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err))
+ return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err))
}
t.db.MaxBatchSize = t.maxBatchSize
@@ -148,7 +148,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
return nil
}
-func (t *boltForest) Init(context.Context) error {
+func (t *boltForest) Init() error {
if t.mode.NoMetabase() || t.db.IsReadOnly() {
return nil
}
@@ -162,7 +162,7 @@ func (t *boltForest) Init(context.Context) error {
})
}
-func (t *boltForest) Close(context.Context) error {
+func (t *boltForest) Close() error {
var err error
if t.db != nil {
err = t.db.Close()
@@ -419,7 +419,10 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri
return err
}
- i, node := t.getPathPrefix(bTree, attr, path)
+ i, node, err := t.getPathPrefix(bTree, attr, path)
+ if err != nil {
+ return err
+ }
ts := t.getLatestTimestamp(bLog, d.Position, d.Size)
lm = make([]Move, len(path)-i+1)
@@ -555,80 +558,6 @@ func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string
return metaerr.Wrap(err)
}
-func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch",
- trace.WithAttributes(
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- m, err := t.filterSeen(cnr, treeID, m)
- if err != nil {
- return err
- }
- if len(m) == 0 {
- success = true
- return nil
- }
-
- ch := make(chan error)
- b := &batch{
- forest: t,
- cid: cnr,
- treeID: treeID,
- results: []chan<- error{ch},
- operations: m,
- }
- go func() {
- b.run()
- }()
- err = <-ch
- success = err == nil
- return metaerr.Wrap(err)
-}
-
-func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) {
- t.modeMtx.RLock()
- defer t.modeMtx.RUnlock()
-
- if t.mode.NoMetabase() {
- return nil, ErrDegradedMode
- }
-
- ops := make([]*Move, 0, len(m))
- err := t.db.View(func(tx *bbolt.Tx) error {
- treeRoot := tx.Bucket(bucketName(cnr, treeID))
- if treeRoot == nil {
- ops = m
- return nil
- }
- b := treeRoot.Bucket(logBucket)
- for _, op := range m {
- var logKey [8]byte
- binary.BigEndian.PutUint64(logKey[:], op.Time)
- seen := b.Get(logKey[:]) != nil
- if !seen {
- ops = append(ops, op)
- }
- }
- return nil
- })
- if err != nil {
- return nil, metaerr.Wrap(err)
- }
- return ops, nil
-}
-
// TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed.
func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error {
var (
@@ -977,7 +906,10 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st
b := treeRoot.Bucket(dataBucket)
- i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
+ i, curNodes, err := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
+ if err != nil {
+ return err
+ }
if i < len(path)-1 {
return nil
}
@@ -1077,7 +1009,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol
}
// TreeSortedByFilename implements the Forest interface.
-func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
+func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) {
var (
startedAt = time.Now()
success = false
@@ -1155,7 +1087,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
}
if len(res) != 0 {
s := string(findAttr(res[len(res)-1].Meta, AttributeFilename))
- last = NewCursor(s, res[len(res)-1].LastChild())
+ last = &s
}
return res, last, metaerr.Wrap(err)
}
@@ -1166,10 +1098,10 @@ func sortByFilename(nodes []NodeInfo) {
})
}
-func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo {
+func sortAndCut(result []NodeInfo, last *string) []NodeInfo {
var lastBytes []byte
if last != nil {
- lastBytes = []byte(last.GetFilename())
+ lastBytes = []byte(*last)
}
sortByFilename(result)
@@ -1234,7 +1166,7 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f
nodes = nil
length = actualLength + 1
count = 0
- c.Seek(binary.LittleEndian.AppendUint16(prefix, length))
+ c.Seek(append(prefix, byte(length), byte(length>>8)))
c.Prev() // c.Next() will be performed by for loop
}
}
@@ -1354,7 +1286,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err
return nil
})
if err != nil {
- return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err))
+ return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err))
}
success = true
return ids, nil
@@ -1498,7 +1430,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
var contID cidSDK.ID
if err := contID.Decode(k[:32]); err != nil {
- return fmt.Errorf("decode container ID: %w", err)
+ return fmt.Errorf("failed to decode containerID: %w", err)
}
res.Items = append(res.Items, ContainerIDTreeID{
CID: contID,
@@ -1506,7 +1438,8 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
})
if len(res.Items) == batchSize {
- res.NextPageToken = bytes.Clone(k)
+ res.NextPageToken = make([]byte, len(k))
+ copy(res.NextPageToken, k)
break
}
}
@@ -1519,7 +1452,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
return &res, nil
}
-func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) {
+func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node, error) {
c := bTree.Cursor()
var curNodes []Node
@@ -1542,14 +1475,14 @@ func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr strin
}
if len(nextNodes) == 0 {
- return i, curNodes
+ return i, curNodes, nil
}
}
- return len(path), nextNodes
+ return len(path), nextNodes, nil
}
-func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) {
+func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) {
c := bTree.Cursor()
var curNode Node
@@ -1569,10 +1502,10 @@ loop:
childKey, value = c.Next()
}
- return i, curNode
+ return i, curNode, nil
}
- return len(path), curNode
+ return len(path), curNode, nil
}
func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
@@ -1582,12 +1515,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:])
- return lm.FromBytes(data[16:])
+ return lm.Meta.FromBytes(data[16:])
}
func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter()
- size := 8 + 8 + lm.Size() + 1
+ size := 8 + 8 + lm.Meta.Size() + 1
// if lm.HasOld {
// size += 8 + lm.Old.Meta.Size()
// }
@@ -1595,7 +1528,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size)
w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent)
- lm.EncodeBinary(w.BinWriter)
+ lm.Meta.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld)
// if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent)
@@ -1657,7 +1590,7 @@ func internalKeyPrefix(key []byte, k string) []byte {
key = append(key, 'i')
l := len(k)
- key = binary.LittleEndian.AppendUint16(key, uint16(l))
+ key = append(key, byte(l), byte(l>>8))
key = append(key, k...)
return key
}
@@ -1672,10 +1605,14 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte {
key = internalKeyPrefix(key, k)
l := len(v)
- key = binary.LittleEndian.AppendUint16(key, uint16(l))
+ key = append(key, byte(l), byte(l>>8))
key = append(key, v...)
- key = binary.LittleEndian.AppendUint64(key, parent)
- key = binary.LittleEndian.AppendUint64(key, node)
+ var raw [8]byte
+ binary.LittleEndian.PutUint64(raw[:], parent)
+ key = append(key, raw[:]...)
+
+ binary.LittleEndian.PutUint64(raw[:], node)
+ key = append(key, raw[:]...)
return key
}
diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go
index ebfd0bcc0..bb5c22e51 100644
--- a/pkg/local_object_storage/pilorama/forest.go
+++ b/pkg/local_object_storage/pilorama/forest.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "slices"
"sort"
"strings"
@@ -85,7 +84,8 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID
s.operations = append(s.operations, op)
}
- mCopy := slices.Clone(m)
+ mCopy := make([]KeyValue, len(m))
+ copy(mCopy, m)
op := s.do(&Move{
Parent: node,
Meta: Meta{
@@ -111,16 +111,7 @@ func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, o
return s.Apply(op)
}
-func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error {
- for _, op := range ops {
- if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (f *memoryForest) Init(context.Context) error {
+func (f *memoryForest) Init() error {
return nil
}
@@ -128,11 +119,11 @@ func (f *memoryForest) Open(context.Context, mode.Mode) error {
return nil
}
-func (f *memoryForest) SetMode(context.Context, mode.Mode) error {
+func (f *memoryForest) SetMode(mode.Mode) error {
return nil
}
-func (f *memoryForest) Close(context.Context) error {
+func (f *memoryForest) Close() error {
return nil
}
func (f *memoryForest) SetParentID(string) {}
@@ -164,7 +155,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string,
}
// TreeSortedByFilename implements the Forest interface.
-func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
+func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -177,7 +168,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
var res []NodeInfo
for _, nodeID := range nodeIDs {
- children := s.getChildren(nodeID)
+ children := s.tree.getChildren(nodeID)
for _, childID := range children {
var found bool
for _, kv := range s.infoMap[childID].Meta.Items {
@@ -204,14 +195,17 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
r := mergeNodeInfos(res)
for i := range r {
- if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() {
- finish := min(len(res), i+count)
+ if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start {
+ finish := i + count
+ if len(res) < finish {
+ finish = len(res)
+ }
last := string(findAttr(r[finish-1].Meta, AttributeFilename))
- return r[i:finish], NewCursor(last, 0), nil
+ return r[i:finish], &last, nil
}
}
last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
- return nil, NewCursor(last, 0), nil
+ return nil, &last, nil
}
// TreeGetChildren implements the Forest interface.
@@ -222,7 +216,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str
return nil, ErrTreeNotFound
}
- children := s.getChildren(nodeID)
+ children := s.tree.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children))
for _, childID := range children {
res = append(res, NodeInfo{
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 844084c55..fbcc53fb3 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -30,7 +30,7 @@ var providers = []struct {
{"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
f := NewMemoryForest()
require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Init())
return f
}},
{"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
@@ -40,7 +40,7 @@ var providers = []struct {
WithMaxBatchSize(1),
}, opts...)...)
require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Init())
return f
}},
}
@@ -61,7 +61,7 @@ func TestForest_TreeMove(t *testing.T) {
}
func testForestTreeMove(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -125,7 +125,7 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
}
func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -247,7 +247,7 @@ func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
}
func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
}
var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
+ treeAppend := func(t *testing.T, last *string, count int) *string {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err)
result = append(result, res...)
@@ -302,7 +302,7 @@ func TestForest_TreeSortedIteration(t *testing.T) {
}
func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
}
var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
+ treeAppend := func(t *testing.T, last *string, count int) *string {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err)
result = append(result, res...)
@@ -361,7 +361,7 @@ func TestForest_TreeSortedFilename(t *testing.T) {
}
func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
const controlAttr = "control_attr"
cid := cidtest.ID()
@@ -453,7 +453,7 @@ func TestForest_TreeDrop(t *testing.T) {
}
func testForestTreeDrop(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
const cidsSize = 3
var cids [cidsSize]cidSDK.ID
@@ -523,7 +523,7 @@ func TestForest_TreeAdd(t *testing.T) {
}
func testForestTreeAdd(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -571,7 +571,7 @@ func TestForest_TreeAddByPath(t *testing.T) {
}
func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -709,7 +709,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
t.Run("add a child, then insert a parent removal", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
@@ -722,7 +722,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
})
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
testApply(t, s, 11, 10, meta)
@@ -792,7 +792,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
t.Run("expected", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
for i := range logs {
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
@@ -801,7 +801,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
})
s := constructor(t, WithMaxBatchSize(batchSize))
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
for range batchSize {
@@ -842,7 +842,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
}
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
t.Run("empty log, no panic", func(t *testing.T) {
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
@@ -883,7 +883,7 @@ func TestForest_TreeExists(t *testing.T) {
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
actual, err := s.TreeExists(context.Background(), cid, treeID)
@@ -942,7 +942,7 @@ func TestApplyTricky1(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1005,7 +1005,7 @@ func TestApplyTricky2(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1115,7 +1115,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close(context.Background())) }()
+ defer func() { require.NoError(t, expected.Close()) }()
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1145,7 +1145,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
wg.Wait()
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close(context.Background()))
+ require.NoError(t, actual.Close())
}
}
@@ -1163,7 +1163,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close(context.Background())) }()
+ defer func() { require.NoError(t, expected.Close()) }()
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1179,7 +1179,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close(context.Background()))
+ require.NoError(t, actual.Close())
}
}
@@ -1197,7 +1197,7 @@ func BenchmarkApplySequential(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close(context.Background())) }()
+ defer func() { require.NoError(b, s.Close()) }()
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
@@ -1233,7 +1233,7 @@ func BenchmarkApplyReorderLast(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close(context.Background())) }()
+ defer func() { require.NoError(b, s.Close()) }()
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
@@ -1290,7 +1290,7 @@ func TestTreeGetByPath(t *testing.T) {
}
func testTreeGetByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
treeID := "version"
@@ -1369,7 +1369,7 @@ func TestGetTrees(t *testing.T) {
}
func testTreeGetTrees(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
d := CIDDescriptor{Position: 0, Size: 1}
@@ -1415,7 +1415,7 @@ func TestTreeLastSyncHeight(t *testing.T) {
}
func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
- defer func() { require.NoError(t, f.Close(context.Background())) }()
+ defer func() { require.NoError(t, f.Close()) }()
cnr := cidtest.ID()
treeID := "someTree"
diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go
index b035be1e1..5a00bcf7a 100644
--- a/pkg/local_object_storage/pilorama/heap.go
+++ b/pkg/local_object_storage/pilorama/heap.go
@@ -30,13 +30,13 @@ func (h *filenameHeap) Pop() any {
// fixedHeap maintains a fixed number of smallest elements started at some point.
type fixedHeap struct {
- start *Cursor
+ start *string
sorted bool
count int
h *filenameHeap
}
-func newHeap(start *Cursor, count int) *fixedHeap {
+func newHeap(start *string, count int) *fixedHeap {
h := new(filenameHeap)
heap.Init(h)
@@ -50,19 +50,8 @@ func newHeap(start *Cursor, count int) *fixedHeap {
const amortizationMultiplier = 5
func (h *fixedHeap) push(id MultiNode, filename string) bool {
- if h.start != nil {
- if filename < h.start.GetFilename() {
- return false
- } else if filename == h.start.GetFilename() {
- // A tree may have a lot of nodes with the same filename but different versions so that
- // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call
- // with the same filename.
- pos := slices.Index(id, h.start.GetNode())
- if pos == -1 || pos+1 >= len(id) {
- return false
- }
- id = id[pos+1:]
- }
+ if h.start != nil && filename <= *h.start {
+ return false
}
*h.h = append(*h.h, heapInfo{id: id, filename: filename})
diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go
index 28b7faec8..ce7b3db1e 100644
--- a/pkg/local_object_storage/pilorama/inmemory.go
+++ b/pkg/local_object_storage/pilorama/inmemory.go
@@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree {
// undo un-does op and changes s in-place.
func (s *memoryTree) undo(op *move) {
if op.HasOld {
- s.infoMap[op.Child] = op.Old
+ s.tree.infoMap[op.Child] = op.Old
} else {
- delete(s.infoMap, op.Child)
+ delete(s.tree.infoMap, op.Child)
}
}
@@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move {
},
}
- shouldPut := !s.isAncestor(op.Child, op.Parent)
- p, ok := s.infoMap[op.Child]
+ shouldPut := !s.tree.isAncestor(op.Child, op.Parent)
+ p, ok := s.tree.infoMap[op.Child]
if ok {
lm.HasOld = true
lm.Old = p
@@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move {
p.Meta = m
p.Parent = op.Parent
- s.infoMap[op.Child] = p
+ s.tree.infoMap[op.Child] = p
return lm
}
@@ -192,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
}
var nodes []Node
- var lastTS Timestamp
+ var lastTs Timestamp
children := t.getChildren(curNode)
for i := range children {
@@ -200,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
fileName := string(info.Meta.GetAttr(attr))
if fileName == path[len(path)-1] {
if latest {
- if info.Meta.Time >= lastTS {
+ if info.Meta.Time >= lastTs {
nodes = append(nodes[:0], children[i])
}
} else {
diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go
index e1f6cd8e7..61a3849bf 100644
--- a/pkg/local_object_storage/pilorama/interface.go
+++ b/pkg/local_object_storage/pilorama/interface.go
@@ -21,8 +21,6 @@ type Forest interface {
// TreeApply applies replicated operation from another node.
// If background is true, TreeApply will first check whether an operation exists.
TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
- // TreeApplyBatch applies replicated operations from another node.
- TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error
// TreeGetByPath returns all nodes corresponding to the path.
// The path is constructed by descending from the root using the values of the
// AttributeFilename in meta.
@@ -37,7 +35,7 @@ type Forest interface {
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
// TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error)
+ TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
@@ -62,10 +60,10 @@ type Forest interface {
type ForestStorage interface {
// DumpInfo returns information about the pilorama.
DumpInfo() Info
- Init(context.Context) error
+ Init() error
Open(context.Context, mode.Mode) error
- Close(context.Context) error
- SetMode(context.Context, mode.Mode) error
+ Close() error
+ SetMode(m mode.Mode) error
SetParentID(id string)
Forest
@@ -79,38 +77,6 @@ const (
AttributeVersion = "Version"
)
-// Cursor keeps state between function calls for traversing nodes.
-// It stores the attributes associated with a previous call, allowing subsequent operations
-// to resume traversal from this point rather than starting from the beginning.
-type Cursor struct {
- // Last traversed filename.
- filename string
-
- // Last traversed node.
- node Node
-}
-
-func NewCursor(filename string, node Node) *Cursor {
- return &Cursor{
- filename: filename,
- node: node,
- }
-}
-
-func (c *Cursor) GetFilename() string {
- if c == nil {
- return ""
- }
- return c.filename
-}
-
-func (c *Cursor) GetNode() Node {
- if c == nil {
- return Node(0)
- }
- return c.node
-}
-
// CIDDescriptor contains container ID and information about the node position
// in the list of container nodes.
type CIDDescriptor struct {
diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go
index 0c042aa56..01d3da9f0 100644
--- a/pkg/local_object_storage/pilorama/mode_test.go
+++ b/pkg/local_object_storage/pilorama/mode_test.go
@@ -19,13 +19,13 @@ func Test_Mode(t *testing.T) {
require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Init())
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close(context.Background()))
+ require.NoError(t, f.Close())
require.NoError(t, f.Open(context.Background(), mode.Degraded))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Init())
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close(context.Background()))
+ require.NoError(t, f.Close())
}
diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go
index 36d347f10..106ba6ae9 100644
--- a/pkg/local_object_storage/pilorama/multinode.go
+++ b/pkg/local_object_storage/pilorama/multinode.go
@@ -25,10 +25,6 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool {
return true
}
-func (r *MultiNodeInfo) LastChild() Node {
- return r.Children[len(r.Children)-1]
-}
-
func (n NodeInfo) ToMultiNode() MultiNodeInfo {
return MultiNodeInfo{
Children: MultiNode{n.ID},
diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go
index eecee1527..54c2b90a6 100644
--- a/pkg/local_object_storage/pilorama/split_test.go
+++ b/pkg/local_object_storage/pilorama/split_test.go
@@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) {
require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4"))
require.Equal(t, []byte{10}, testGetByPath(t, "value0"))
- testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) {
+ testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) {
res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize)
require.NoError(t, err)
return res, last
diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go
index b4015ae8d..364649b50 100644
--- a/pkg/local_object_storage/shard/container.go
+++ b/pkg/local_object_storage/shard/container.go
@@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 {
return r.size
}
-func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
+func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
s.m.RLock()
defer s.m.RUnlock()
@@ -34,15 +34,9 @@ func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (Contai
return ContainerSizeRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ContainerSizeRes{}, err
- }
- defer release()
-
size, err := s.metaBase.ContainerSize(prm.cnr)
if err != nil {
- return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err)
+ return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err)
}
return ContainerSizeRes{
@@ -75,15 +69,9 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont
return ContainerCountRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ContainerCountRes{}, err
- }
- defer release()
-
counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
if err != nil {
- return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err)
+ return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err)
}
return ContainerCountRes{
@@ -112,12 +100,6 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
return s.metaBase.DeleteContainerSize(ctx, id)
}
@@ -140,11 +122,5 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
return s.metaBase.DeleteContainerCount(ctx, id)
}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index a607f70f7..62800dbd0 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -20,25 +20,25 @@ import (
"golang.org/x/sync/errgroup"
)
-func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error {
- s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode,
+func (s *Shard) handleMetabaseFailure(stage string, err error) error {
+ s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.ReadOnly),
zap.Error(err))
- err = s.SetMode(ctx, mode.ReadOnly)
+ err = s.SetMode(mode.ReadOnly)
if err == nil {
return nil
}
- s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode,
+ s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.DegradedReadOnly),
zap.Error(err))
- err = s.SetMode(ctx, mode.DegradedReadOnly)
+ err = s.SetMode(mode.DegradedReadOnly)
if err != nil {
- return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly)
+ return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly))
}
return nil
}
@@ -72,10 +72,10 @@ func (s *Shard) Open(ctx context.Context) error {
for j := i + 1; j < len(components); j++ {
if err := components[j].Open(ctx, m); err != nil {
// Other components must be opened, fail.
- return fmt.Errorf("open %T: %w", components[j], err)
+ return fmt.Errorf("could not open %T: %w", components[j], err)
}
}
- err = s.handleMetabaseFailure(ctx, "open", err)
+ err = s.handleMetabaseFailure("open", err)
if err != nil {
return err
}
@@ -83,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error {
break
}
- return fmt.Errorf("open %T: %w", component, err)
+ return fmt.Errorf("could not open %T: %w", component, err)
}
}
return nil
@@ -91,8 +91,8 @@ func (s *Shard) Open(ctx context.Context) error {
type metabaseSynchronizer Shard
-func (x *metabaseSynchronizer) Init(ctx context.Context) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init")
+func (x *metabaseSynchronizer) Init() error {
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init")
defer span.End()
return (*Shard)(x).refillMetabase(ctx)
@@ -101,24 +101,26 @@ func (x *metabaseSynchronizer) Init(ctx context.Context) error {
// Init initializes all Shard's components.
func (s *Shard) Init(ctx context.Context) error {
m := s.GetMode()
- if err := s.initializeComponents(ctx, m); err != nil {
+ if err := s.initializeComponents(m); err != nil {
return err
}
s.updateMetrics(ctx)
s.gc = &gc{
- gcCfg: &s.gcCfg,
- remover: s.removeGarbage,
- stopChannel: make(chan struct{}),
- newEpochChan: make(chan uint64),
- newEpochHandlers: &newEpochHandlers{
- cancelFunc: func() {},
- handlers: []newEpochHandler{
- s.collectExpiredLocks,
- s.collectExpiredObjects,
- s.collectExpiredTombstones,
- s.collectExpiredMetrics,
+ gcCfg: &s.gcCfg,
+ remover: s.removeGarbage,
+ stopChannel: make(chan struct{}),
+ eventChan: make(chan Event),
+ mEventHandler: map[eventType]*eventHandlers{
+ eventNewEpoch: {
+ cancelFunc: func() {},
+ handlers: []eventHandler{
+ s.collectExpiredLocks,
+ s.collectExpiredObjects,
+ s.collectExpiredTombstones,
+ s.collectExpiredMetrics,
+ },
},
},
}
@@ -136,9 +138,9 @@ func (s *Shard) Init(ctx context.Context) error {
return nil
}
-func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
+func (s *Shard) initializeComponents(m mode.Mode) error {
type initializer interface {
- Init(context.Context) error
+ Init() error
}
var components []initializer
@@ -168,13 +170,13 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
}
for _, component := range components {
- if err := component.Init(ctx); err != nil {
+ if err := component.Init(); err != nil {
if component == s.metaBase {
if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) {
return fmt.Errorf("metabase initialization: %w", err)
}
- err = s.handleMetabaseFailure(ctx, "init", err)
+ err = s.handleMetabaseFailure("init", err)
if err != nil {
return err
}
@@ -182,7 +184,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
break
}
- return fmt.Errorf("initialize %T: %w", component, err)
+ return fmt.Errorf("could not initialize %T: %w", component, err)
}
}
return nil
@@ -203,19 +205,19 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err := s.metaBase.Reset()
if err != nil {
- return fmt.Errorf("reset metabase: %w", err)
+ return fmt.Errorf("could not reset metabase: %w", err)
}
withCount := true
totalObjects, err := s.blobStor.ObjectsCount(ctx)
if err != nil {
- s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
+ s.log.Warn(logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
withCount = false
}
eg, egCtx := errgroup.WithContext(ctx)
- if s.refillMetabaseWorkersCount > 0 {
- eg.SetLimit(s.refillMetabaseWorkersCount)
+ if s.cfg.refillMetabaseWorkersCount > 0 {
+ eg.SetLimit(s.cfg.refillMetabaseWorkersCount)
}
var completedCount uint64
@@ -252,12 +254,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err = errors.Join(egErr, itErr)
if err != nil {
- return fmt.Errorf("put objects to the meta: %w", err)
+ return fmt.Errorf("could not put objects to the meta: %w", err)
}
err = s.metaBase.SyncCounters()
if err != nil {
- return fmt.Errorf("sync object counters: %w", err)
+ return fmt.Errorf("could not sync object counters: %w", err)
}
success = true
@@ -268,9 +270,9 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error {
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
+ s.log.Warn(logs.ShardCouldNotUnmarshalObject,
zap.Stringer("address", addr),
- zap.Error(err))
+ zap.String("err", err.Error()))
return nil
}
@@ -278,12 +280,12 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
var isIndexedContainer bool
if hasIndexedAttribute {
- info, err := s.containerInfo.Info(ctx, addr.Container())
+ info, err := s.containerInfo.Info(addr.Container())
if err != nil {
return err
}
if info.Removed {
- s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
+ s.log.Debug(logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
return nil
}
isIndexedContainer = info.Indexed
@@ -316,7 +318,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("unmarshal lock content: %w", err)
+ return fmt.Errorf("could not unmarshal lock content: %w", err)
}
locked := make([]oid.ID, lock.NumberOfMembers())
@@ -326,7 +328,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err
id, _ := obj.ID()
err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
- return fmt.Errorf("lock objects: %w", err)
+ return fmt.Errorf("could not lock objects: %w", err)
}
return nil
}
@@ -335,7 +337,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("unmarshal tombstone content: %w", err)
+ return fmt.Errorf("could not unmarshal tombstone content: %w", err)
}
tombAddr := object.AddressOf(obj)
@@ -356,18 +358,17 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
_, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
- return fmt.Errorf("inhume objects: %w", err)
+ return fmt.Errorf("could not inhume objects: %w", err)
}
return nil
}
// Close releases all Shard's components.
-func (s *Shard) Close(ctx context.Context) error {
- unlock := s.lockExclusive()
+func (s *Shard) Close() error {
if s.rb != nil {
- s.rb.Stop(ctx, s.log)
+ s.rb.Stop(s.log)
}
- var components []interface{ Close(context.Context) error }
+ var components []interface{ Close() error }
if s.pilorama != nil {
components = append(components, s.pilorama)
@@ -383,23 +384,15 @@ func (s *Shard) Close(ctx context.Context) error {
var lastErr error
for _, component := range components {
- if err := component.Close(ctx); err != nil {
+ if err := component.Close(); err != nil {
lastErr = err
- s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err))
+ s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
}
}
- if s.opsLimiter != nil {
- s.opsLimiter.Close()
- }
-
- unlock()
-
- // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock.
- // So to prevent deadlock GC stopping is outside of exclusive lock.
// If Init/Open was unsuccessful gc can be nil.
if s.gc != nil {
- s.gc.stop(ctx)
+ s.gc.stop()
}
return lastErr
@@ -421,18 +414,18 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
unlock := s.lockExclusive()
defer unlock()
- s.rb.Stop(ctx, s.log)
+ s.rb.Stop(s.log)
if !s.info.Mode.NoMetabase() {
defer func() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}()
}
- ok, err := s.metaBase.Reload(ctx, c.metaOpts...)
+ ok, err := s.metaBase.Reload(c.metaOpts...)
if err != nil {
if errors.Is(err, meta.ErrDegradedMode) {
- s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
- _ = s.setMode(ctx, mode.DegradedReadOnly)
+ s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
+ _ = s.setMode(mode.DegradedReadOnly)
}
return err
}
@@ -444,28 +437,15 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
// config after the node was updated.
err = s.refillMetabase(ctx)
} else {
- err = s.metaBase.Init(ctx)
+ err = s.metaBase.Init()
}
if err != nil {
- s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
- _ = s.setMode(ctx, mode.DegradedReadOnly)
+ s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
+ _ = s.setMode(mode.DegradedReadOnly)
return err
}
}
- if err := s.setMode(ctx, c.info.Mode); err != nil {
- return err
- }
- s.reloadOpsLimiter(&c)
-
- return nil
-}
-
-func (s *Shard) reloadOpsLimiter(c *cfg) {
- if c.configOpsLimiter != nil {
- old := s.opsLimiter.ptr.Swap(&qosLimiterHolder{Limiter: c.configOpsLimiter})
- old.Close()
- s.opsLimiter.SetParentID(s.info.ID.String())
- }
+ return s.setMode(c.info.Mode)
}
func (s *Shard) lockExclusive() func() {
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 6d2cd7137..b8f1d4417 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -86,7 +86,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadWrite, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
// Metabase can be opened in read-only => start in ReadOnly mode.
allowedMode.Store(int64(os.O_RDONLY))
@@ -95,9 +95,9 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite))
+ require.Error(t, sh.SetMode(mode.ReadWrite))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
// Metabase is corrupted => start in DegradedReadOnly mode.
allowedMode.Store(math.MaxInt64)
@@ -106,7 +106,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.DegradedReadOnly, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
}
func TestRefillMetabaseCorrupted(t *testing.T) {
@@ -146,7 +146,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
putPrm.SetObject(obj)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
addr := object.AddressOf(obj)
// This is copied from `fstree.treePath()` to avoid exporting function just for tests.
@@ -170,7 +170,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
getPrm.SetAddress(addr)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
}
func TestRefillMetabase(t *testing.T) {
@@ -358,7 +358,7 @@ func TestRefillMetabase(t *testing.T) {
phyBefore := c.Phy
logicalBefore := c.Logic
- err = sh.Close(context.Background())
+ err = sh.Close()
require.NoError(t, err)
sh = New(
@@ -379,7 +379,7 @@ func TestRefillMetabase(t *testing.T) {
// initialize Blobstor
require.NoError(t, sh.Init(context.Background()))
- defer sh.Close(context.Background())
+ defer sh.Close()
checkAllObjs(false)
checkObj(object.AddressOf(tombObj), nil)
diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go
index 8dc1f0522..b3bc6a30b 100644
--- a/pkg/local_object_storage/shard/count.go
+++ b/pkg/local_object_storage/shard/count.go
@@ -23,12 +23,6 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) {
return 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
-
cc, err := s.metaBase.ObjectCounters()
if err != nil {
return 0, err
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index 0101817a8..c898fdf41 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -54,12 +55,6 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del
return DeleteRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return DeleteRes{}, err
- }
- defer release()
-
result := DeleteRes{}
for _, addr := range prm.addr {
select {
@@ -100,7 +95,7 @@ func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr
}
_, err := s.writeCache.Head(ctx, addr)
if err == nil {
- s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
+ s.log.Warn(logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
return fmt.Errorf("object %s must be flushed from writecache", addr)
}
if client.IsErrObjectNotFound(err) {
@@ -115,9 +110,10 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
res, err := s.metaBase.StorageID(ctx, sPrm)
if err != nil {
- s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
+ s.log.Debug(logs.StorageIDRetrievalFailure,
zap.Stringer("object", addr),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
}
storageID := res.StorageID()
@@ -134,9 +130,10 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
_, err = s.blobStor.Delete(ctx, delPrm)
if err != nil && !client.IsErrObjectNotFound(err) {
- s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
+ s.log.Debug(logs.ObjectRemovalFailureBlobStor,
zap.Stringer("object_address", addr),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
}
return nil
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index c9ce93bc5..574250a93 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -37,7 +37,7 @@ func TestShard_Delete_BigObject(t *testing.T) {
func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index 2c11b6b01..784bf293a 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -18,7 +18,7 @@ type ExistsPrm struct {
// Exists option to set object checked for existence.
Address oid.Address
// Exists option to set parent object checked for existence.
- ECParentAddress oid.Address
+ ParentAddress oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
@@ -53,6 +53,10 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
))
defer span.End()
+ var exists bool
+ var locked bool
+ var err error
+
s.m.RLock()
defer s.m.RUnlock()
@@ -60,18 +64,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
return ExistsRes{}, ErrShardDisabled
} else if s.info.EvacuationInProgress {
return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ExistsRes{}, err
- }
- defer release()
-
- var exists bool
- var locked bool
-
- if s.info.Mode.NoMetabase() {
+ } else if s.info.Mode.NoMetabase() {
var p common.ExistsPrm
p.Address = prm.Address
@@ -81,7 +74,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
} else {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(prm.Address)
- existsPrm.SetECParent(prm.ECParentAddress)
+ existsPrm.SetParent(prm.ParentAddress)
var res meta.ExistsRes
res, err = s.metaBase.Exists(ctx, existsPrm)
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index a262a52cb..d605746e8 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -6,13 +6,11 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -33,14 +31,41 @@ type TombstoneSource interface {
IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool
}
-type newEpochHandler func(context.Context, uint64)
+// Event represents class of external events.
+type Event interface {
+ typ() eventType
+}
-type newEpochHandlers struct {
+type eventType int
+
+const (
+ _ eventType = iota
+ eventNewEpoch
+)
+
+type newEpoch struct {
+ epoch uint64
+}
+
+func (e newEpoch) typ() eventType {
+ return eventNewEpoch
+}
+
+// EventNewEpoch returns new epoch event.
+func EventNewEpoch(e uint64) Event {
+ return newEpoch{
+ epoch: e,
+ }
+}
+
+type eventHandler func(context.Context, Event)
+
+type eventHandlers struct {
prevGroup sync.WaitGroup
cancelFunc context.CancelFunc
- handlers []newEpochHandler
+ handlers []eventHandler
}
type gcRunResult struct {
@@ -82,10 +107,10 @@ type gc struct {
remover func(context.Context) gcRunResult
- // newEpochChan is used only for listening for the new epoch event.
+ // eventChan is used only for listening for the new epoch event.
// It is ok to keep opened, we are listening for context done when writing in it.
- newEpochChan chan uint64
- newEpochHandlers *newEpochHandlers
+ eventChan chan Event
+ mEventHandler map[eventType]*eventHandlers
}
type gcCfg struct {
@@ -106,7 +131,7 @@ type gcCfg struct {
func defaultGCCfg() gcCfg {
return gcCfg{
removerInterval: 10 * time.Second,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
workerPoolInit: func(int) util.WorkerPool {
return nil
},
@@ -115,8 +140,16 @@ func defaultGCCfg() gcCfg {
}
func (gc *gc) init(ctx context.Context) {
- gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers))
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
+ sz := 0
+
+ for _, v := range gc.mEventHandler {
+ sz += len(v.handlers)
+ }
+
+ if sz > 0 {
+ gc.workerPool = gc.workerPoolInit(sz)
+ }
+
gc.wg.Add(2)
go gc.tickRemover(ctx)
go gc.listenEvents(ctx)
@@ -128,14 +161,14 @@ func (gc *gc) listenEvents(ctx context.Context) {
for {
select {
case <-gc.stopChannel:
- gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel)
+ gc.log.Warn(logs.ShardStopEventListenerByClosedStopChannel)
return
case <-ctx.Done():
- gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
+ gc.log.Warn(logs.ShardStopEventListenerByContext)
return
- case event, ok := <-gc.newEpochChan:
+ case event, ok := <-gc.eventChan:
if !ok {
- gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
+ gc.log.Warn(logs.ShardStopEventListenerByClosedEventChannel)
return
}
@@ -144,38 +177,43 @@ func (gc *gc) listenEvents(ctx context.Context) {
}
}
-func (gc *gc) handleEvent(ctx context.Context, epoch uint64) {
- gc.newEpochHandlers.cancelFunc()
- gc.newEpochHandlers.prevGroup.Wait()
+func (gc *gc) handleEvent(ctx context.Context, event Event) {
+ v, ok := gc.mEventHandler[event.typ()]
+ if !ok {
+ return
+ }
+
+ v.cancelFunc()
+ v.prevGroup.Wait()
var runCtx context.Context
- runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx)
+ runCtx, v.cancelFunc = context.WithCancel(ctx)
- gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers))
+ v.prevGroup.Add(len(v.handlers))
- for i := range gc.newEpochHandlers.handlers {
+ for i := range v.handlers {
select {
case <-ctx.Done():
return
default:
}
- h := gc.newEpochHandlers.handlers[i]
+ h := v.handlers[i]
err := gc.workerPool.Submit(func() {
- defer gc.newEpochHandlers.prevGroup.Done()
- h(runCtx, epoch)
+ defer v.prevGroup.Done()
+ h(runCtx, event)
})
if err != nil {
- gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
- zap.Error(err),
+ gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
+ zap.String("error", err.Error()),
)
- gc.newEpochHandlers.prevGroup.Done()
+ v.prevGroup.Done()
}
}
}
-func (gc *gc) releaseResources(ctx context.Context) {
+func (gc *gc) releaseResources() {
if gc.workerPool != nil {
gc.workerPool.Release()
}
@@ -184,7 +222,7 @@ func (gc *gc) releaseResources(ctx context.Context) {
// because it is possible that we are close it earlier than stop writing.
// It is ok to keep it opened.
- gc.log.Debug(ctx, logs.ShardGCIsStopped)
+ gc.log.Debug(logs.ShardGCIsStopped)
}
func (gc *gc) tickRemover(ctx context.Context) {
@@ -198,10 +236,10 @@ func (gc *gc) tickRemover(ctx context.Context) {
case <-ctx.Done():
// Context canceled earlier than we start to close shards.
// It make sense to stop collecting garbage by context too.
- gc.releaseResources(ctx)
+ gc.releaseResources()
return
case <-gc.stopChannel:
- gc.releaseResources(ctx)
+ gc.releaseResources()
return
case <-timer.C:
startedAt := time.Now()
@@ -220,16 +258,13 @@ func (gc *gc) tickRemover(ctx context.Context) {
}
}
-func (gc *gc) stop(ctx context.Context) {
+func (gc *gc) stop() {
gc.onceStop.Do(func() {
close(gc.stopChannel)
})
- gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop)
+ gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
-
- gc.newEpochHandlers.cancelFunc()
- gc.newEpochHandlers.prevGroup.Wait()
}
// iterates over metabase and deletes objects
@@ -251,47 +286,8 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
return
}
- s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted)
- defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted)
-
- buf, err := s.getGarbage(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
- zap.Error(err),
- )
-
- return
- } else if len(buf) == 0 {
- result.success = true
- return
- }
-
- var deletePrm DeletePrm
- deletePrm.SetAddresses(buf...)
-
- // delete accumulated objects
- res, err := s.delete(ctx, deletePrm, true)
-
- result.deleted = res.deleted
- result.failedToDelete = uint64(len(buf)) - res.deleted
- result.success = true
-
- if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
- zap.Error(err),
- )
- result.success = false
- }
-
- return
-}
-
-func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
+ s.log.Debug(logs.ShardGCRemoveGarbageStarted)
+ defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted)
buf := make([]oid.Address, 0, s.rmBatchSize)
@@ -312,20 +308,47 @@ func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
return nil
})
- if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil {
- return nil, err
+ // iterate over metabase's objects with GC mark
+ // (no more than s.rmBatchSize objects)
+ err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
+ if err != nil {
+ s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ } else if len(buf) == 0 {
+ result.success = true
+ return
}
- return buf, nil
-}
+ var deletePrm DeletePrm
+ deletePrm.SetAddresses(buf...)
+
+ // delete accumulated objects
+ res, err := s.delete(ctx, deletePrm, true)
+
+ result.deleted = res.deleted
+ result.failedToDelete = uint64(len(buf)) - res.deleted
+ result.success = true
+
+ if err != nil {
+ s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
+ zap.String("error", err.Error()),
+ )
+ result.success = false
+ }
-func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
- workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
- batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
return
}
-func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
+func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
+ workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount)
+ batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize)
+ return
+}
+
+func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -333,8 +356,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
}()
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch))
+ s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -343,7 +366,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock {
batch = append(batch, o.Address())
@@ -373,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
+ s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
}
}
@@ -391,25 +414,24 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
return
}
- s.handleExpiredObjectsUnsafe(ctx, expired)
-}
-
-func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) {
- select {
- case <-ctx.Done():
- return
- default:
- }
-
expired, err := s.getExpiredWithLinked(ctx, expired)
if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
+ s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
return
}
- res, err := s.inhumeGC(ctx, expired)
+ var inhumePrm meta.InhumePrm
+
+ inhumePrm.SetAddresses(expired...)
+ inhumePrm.SetGCMark()
+
+ // inhume the collected objects
+ res, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err))
+ s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
+ zap.String("error", err.Error()),
+ )
+
return
}
@@ -427,12 +449,6 @@ func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Ad
}
func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
-
result := make([]oid.Address, 0, len(source))
parentToChildren, err := s.metaBase.GetChildren(ctx, source)
if err != nil {
@@ -446,20 +462,7 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address)
return result, nil
}
-func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) {
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return meta.InhumeRes{}, err
- }
- defer release()
-
- var inhumePrm meta.InhumePrm
- inhumePrm.SetAddresses(addrs...)
- inhumePrm.SetGCMark()
- return s.metaBase.Inhume(ctx, inhumePrm)
-}
-
-func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
+func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -467,10 +470,11 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone)
}()
+ epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
- log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
- defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling)
+ log.Debug(logs.ShardStartedExpiredTombstonesHandling)
+ defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
const tssDeleteBatch = 50
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@@ -488,29 +492,22 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
})
for {
- log.Debug(ctx, logs.ShardIteratingTombstones)
+ log.Debug(logs.ShardIteratingTombstones)
s.m.RLock()
if s.info.Mode.NoMetabase() {
- s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
+ s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
s.m.RUnlock()
return
}
- var release qos.ReleaseFunc
- release, err = s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
- s.m.RUnlock()
- return
- }
err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
- release()
if err != nil {
- log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+ log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
+
return
}
@@ -527,7 +524,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
}
}
- log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
+ log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
if len(tssExp) > 0 {
s.expiredTombstonesCallback(ctx, tssExp)
}
@@ -538,7 +535,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
}
}
-func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
+func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -546,8 +543,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
}()
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch))
+ s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -557,14 +554,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() == objectSDK.TypeLock {
batch = append(batch, o.Address())
if len(batch) == batchSize {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, epoch, expired)
+ s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
batch = make([]oid.Address, 0, batchSize)
@@ -578,7 +575,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
if len(batch) > 0 {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, epoch, expired)
+ s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
}
@@ -587,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
+ s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
}
}
@@ -599,13 +596,7 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
return ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
+ err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
select {
case <-ctx.Done():
return meta.ErrInterruptIterator
@@ -621,11 +612,12 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
}
func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
}
- defer release()
return s.metaBase.FilterExpired(ctx, epoch, addresses)
}
@@ -635,22 +627,28 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid
//
// Does not modify tss.
func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
+ if s.GetMode().NoMetabase() {
return
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
- return
+ // Mark tombstones as garbage.
+ var pInhume meta.InhumePrm
+
+ tsAddrs := make([]oid.Address, 0, len(tss))
+ for _, ts := range tss {
+ tsAddrs = append(tsAddrs, ts.Tombstone())
}
- res, err := s.metaBase.InhumeTombstones(ctx, tss)
- release()
+
+ pInhume.SetGCMark()
+ pInhume.SetAddresses(tsAddrs...)
+
+ // inhume tombstones
+ res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
+ s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
+ zap.String("error", err.Error()),
+ )
+
return
}
@@ -665,27 +663,26 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size))
i++
}
+
+ // drop just processed expired tombstones
+ // from graveyard
+ err = s.metaBase.DropGraves(ctx, tss)
+ if err != nil {
+ s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
+ }
}
// HandleExpiredLocks unlocks all objects which were locked by lockers.
// If successful, marks lockers themselves as garbage.
func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return
- }
-
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ if s.GetMode().NoMetabase() {
return
}
unlocked, err := s.metaBase.FreeLockedBy(lockers)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToUnlockObjects,
+ zap.String("error", err.Error()),
+ )
return
}
@@ -693,15 +690,13 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
var pInhume meta.InhumePrm
pInhume.SetAddresses(lockers...)
pInhume.SetForceGCMark()
- release, err = s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
- return
- }
+
res, err := s.metaBase.Inhume(ctx, pInhume)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
+ zap.String("error", err.Error()),
+ )
+
return
}
@@ -723,7 +718,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
return
}
@@ -731,57 +726,47 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc
return
}
- s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked)
+ s.handleExpiredObjects(ctx, expiredUnlocked)
}
// HandleDeletedLocks unlocks all objects which were locked by lockers.
-func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
+func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
+ if s.GetMode().NoMetabase() {
return
}
- release, err := s.opsLimiter.WriteRequest(ctx)
+ _, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
- return
- }
- _, err = s.metaBase.FreeLockedBy(lockers)
- release()
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToUnlockObjects,
+ zap.String("error", err.Error()),
+ )
+
return
}
}
-// NotificationChannel returns channel for new epoch events.
-func (s *Shard) NotificationChannel() chan<- uint64 {
- return s.gc.newEpochChan
+// NotificationChannel returns channel for shard events.
+func (s *Shard) NotificationChannel() chan<- Event {
+ return s.gc.eventChan
}
-func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) {
+func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics")
defer span.End()
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
+ epoch := e.(newEpoch).epoch
+
+ s.log.Debug(logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
s.collectExpiredContainerSizeMetrics(ctx, epoch)
s.collectExpiredContainerCountMetrics(ctx, epoch)
}
func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
- return
- }
ids, err := s.metaBase.ZeroSizeContainers(ctx)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ s.log.Warn(logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
return
}
if len(ids) == 0 {
@@ -791,15 +776,9 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui
}
func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
- return
- }
ids, err := s.metaBase.ZeroCountContainers(ctx)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ s.log.Warn(logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
return
}
if len(ids) == 0 {
diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go
index 54d2f1510..11db5e54e 100644
--- a/pkg/local_object_storage/shard/gc_internal_test.go
+++ b/pkg/local_object_storage/shard/gc_internal_test.go
@@ -37,8 +37,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -62,8 +61,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
meta.WithEpochState(epochState{}),
),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
@@ -80,7 +79,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
sh = New(opts...)
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index f512a488a..2b97111e7 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -34,7 +34,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
@@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
require.NoError(t, err)
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), epoch.Value)
+ sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
var getPrm GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
@@ -131,7 +131,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
@@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
require.True(t, errors.As(err, &splitInfoError), "split info must be provided")
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), epoch.Value)
+ sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")
@@ -190,7 +190,7 @@ func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool
additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
obj := testutil.GenerateObjectWithSize(1024)
@@ -254,7 +254,7 @@ func TestGCDontDeleteObjectFromWritecache(t *testing.T) {
additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
obj := testutil.GenerateObjectWithSize(1024)
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 28f8912be..d1c393613 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -10,6 +10,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -111,12 +112,6 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
return c.Get(ctx, prm.addr)
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return GetRes{}, err
- }
- defer release()
-
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
@@ -149,7 +144,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
} else {
- s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
+ s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
}
if s.hasWriteCache() {
@@ -158,14 +153,16 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return res, false, err
}
if client.IsErrObjectNotFound(err) {
- s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache,
+ s.log.Debug(logs.ShardObjectIsMissingInWritecache,
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta))
+ zap.Bool("skip_meta", skipMeta),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
} else {
- s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache,
+ s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
zap.Error(err),
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta))
+ zap.Bool("skip_meta", skipMeta),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
if skipMeta || mErr != nil {
@@ -178,7 +175,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
- return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err)
+ return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
}
storageID := mExRes.StorageID()
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index 837991b73..d0eecf74e 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -30,7 +30,7 @@ func TestShard_Get(t *testing.T) {
func testShardGet(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
var putPrm PutPrm
var getPrm GetPrm
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index 34b8290d6..ff57e3bf9 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -81,12 +81,6 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
headParams.SetAddress(prm.addr)
headParams.SetRaw(prm.raw)
- release, limitErr := s.opsLimiter.ReadRequest(ctx)
- if limitErr != nil {
- return HeadRes{}, limitErr
- }
- defer release()
-
var res meta.GetRes
res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index deb3019df..c65bbb1e3 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -28,7 +28,7 @@ func TestShard_Head(t *testing.T) {
func testShardHead(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
var putPrm PutPrm
var headPrm HeadPrm
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index 7391adef2..a72313498 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -1,11 +1,11 @@
package shard
import (
- "context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/mr-tron/base58"
"go.uber.org/zap"
)
@@ -31,12 +31,12 @@ func (s *Shard) ID() *ID {
}
// UpdateID reads shard ID saved in the metabase and updates it if it is missing.
-func (s *Shard) UpdateID(ctx context.Context) (err error) {
+func (s *Shard) UpdateID() (err error) {
var idFromMetabase []byte
modeDegraded := s.GetMode().NoMetabase()
if !modeDegraded {
- if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
- err = fmt.Errorf("read shard id from metabase: %w", err)
+ if idFromMetabase, err = s.metaBase.GetShardID(mode.ReadOnly); err != nil {
+ err = fmt.Errorf("failed to read shard id from metabase: %w", err)
}
}
@@ -45,12 +45,12 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
}
shardID := s.info.ID.String()
- s.metricsWriter.SetShardID(shardID)
+ s.cfg.metricsWriter.SetShardID(shardID)
if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID)
}
- s.log = s.log.With(zap.Stringer("shard_id", s.info.ID))
+ s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))}
s.metaBase.SetLogger(s.log)
s.blobStor.SetLogger(s.log)
if s.hasWriteCache() {
@@ -61,11 +61,10 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
if s.pilorama != nil {
s.pilorama.SetParentID(s.info.ID.String())
}
- s.opsLimiter.SetParentID(s.info.ID.String())
if len(idFromMetabase) == 0 && !modeDegraded {
- if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
- err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr))
+ if setErr := s.metaBase.SetShardID(*s.info.ID, s.GetMode()); setErr != nil {
+ err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr))
}
}
return
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index c0fd65f4b..746177c3a 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -81,12 +82,6 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return InhumeRes{}, err
- }
- defer release()
-
if s.hasWriteCache() {
for i := range prm.target {
_ = s.writeCache.Delete(ctx, prm.target[i])
@@ -114,8 +109,9 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrLockObjectRemoval
}
- s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
- zap.Error(err),
+ s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
s.m.RUnlock()
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 1421f0e18..1353d5d94 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -27,7 +27,7 @@ func TestShard_Inhume(t *testing.T) {
func testShardInhume(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index af87981ca..8d09974b8 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -106,15 +107,9 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
return SelectRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return SelectRes{}, err
- }
- defer release()
-
lst, err := s.metaBase.Containers(ctx)
if err != nil {
- return res, fmt.Errorf("list stored containers: %w", err)
+ return res, fmt.Errorf("can't list stored containers: %w", err)
}
filters := objectSDK.NewSearchFilters()
@@ -127,9 +122,10 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase
if err != nil {
- s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
+ s.log.Debug(logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
}
@@ -151,15 +147,9 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo
return ListContainersRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ListContainersRes{}, err
- }
- defer release()
-
containers, err := s.metaBase.Containers(ctx)
if err != nil {
- return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err)
+ return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err)
}
return ListContainersRes{
@@ -185,18 +175,12 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
return ListWithCursorRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ListWithCursorRes{}, err
- }
- defer release()
-
var metaPrm meta.ListPrm
metaPrm.SetCount(prm.count)
metaPrm.SetCursor(prm.cursor)
res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
if err != nil {
- return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err)
+ return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err)
}
return ListWithCursorRes{
@@ -220,17 +204,11 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai
return ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
var metaPrm meta.IterateOverContainersPrm
metaPrm.Handler = prm.Handler
- err = s.metaBase.IterateOverContainers(ctx, metaPrm)
+ err := s.metaBase.IterateOverContainers(ctx, metaPrm)
if err != nil {
- return fmt.Errorf("iterate over containers: %w", err)
+ return fmt.Errorf("could not iterate over containers: %w", err)
}
return nil
@@ -251,19 +229,13 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
return ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
var metaPrm meta.IterateOverObjectsInContainerPrm
metaPrm.ContainerID = prm.ContainerID
metaPrm.ObjectType = prm.ObjectType
metaPrm.Handler = prm.Handler
- err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
+ err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
if err != nil {
- return fmt.Errorf("iterate over objects: %w", err)
+ return fmt.Errorf("could not iterate over objects: %w", err)
}
return nil
@@ -281,18 +253,12 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive
return 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
-
var metaPrm meta.CountAliveObjectsInContainerPrm
metaPrm.ObjectType = prm.ObjectType
metaPrm.ContainerID = prm.ContainerID
count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
if err != nil {
- return 0, fmt.Errorf("count alive objects in bucket: %w", err)
+ return 0, fmt.Errorf("could not count alive objects in bucket: %w", err)
}
return count, nil
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index 139b2e316..3414dc76a 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -18,14 +18,14 @@ func TestShard_List(t *testing.T) {
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
testShardList(t, sh)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
shWC := newShard(t, true)
- defer func() { require.NoError(t, shWC.Close(context.Background())) }()
+ defer func() { require.NoError(t, shWC.Close()) }()
testShardList(t, shWC)
})
}
diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go
index 9c392fdac..4a8d89d63 100644
--- a/pkg/local_object_storage/shard/lock.go
+++ b/pkg/local_object_storage/shard/lock.go
@@ -38,13 +38,7 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- err = s.metaBase.Lock(ctx, idCnr, locker, locked)
+ err := s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@@ -67,12 +61,6 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return false, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return false, err
- }
- defer release()
-
var prm meta.IsLockedPrm
prm.SetAddress(addr)
@@ -84,10 +72,10 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return res.Locked(), nil
}
-// GetLocks return lock id's of the provided object. Not found object is
+// GetLocked return lock id's of the provided object. Not found object is
// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
-func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks",
+func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocked",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", addr.EncodeToString()),
@@ -98,12 +86,5 @@ func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error
if m.NoMetabase() {
return nil, ErrDegradedMode
}
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
-
- return s.metaBase.GetLocks(ctx, addr)
+ return s.metaBase.GetLocked(ctx, addr)
}
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 3878a65cd..9ce95feb1 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -28,10 +28,9 @@ func TestShard_Lock(t *testing.T) {
var sh *Shard
rootPath := t.TempDir()
- l := logger.NewLoggerWrapper(zap.NewNop())
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
- WithLogger(l),
+ WithLogger(&logger.Logger{Logger: zap.NewNop()}),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -54,8 +53,8 @@ func TestShard_Lock(t *testing.T) {
meta.WithPath(filepath.Join(rootPath, "meta")),
meta.WithEpochState(epochState{}),
),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
}
@@ -63,7 +62,7 @@ func TestShard_Lock(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
@@ -149,7 +148,7 @@ func TestShard_Lock(t *testing.T) {
func TestShard_IsLocked(t *testing.T) {
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 5230dcad0..cec5a12ad 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -201,11 +201,11 @@ func TestCounters(t *testing.T) {
dir := t.TempDir()
sh, mm := shardWithMetrics(t, dir)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
- sh.SetMode(context.Background(), mode.ReadOnly)
+ sh.SetMode(mode.ReadOnly)
require.Equal(t, mode.ReadOnly, mm.mode)
- sh.SetMode(context.Background(), mode.ReadWrite)
+ sh.SetMode(mode.ReadWrite)
require.Equal(t, mode.ReadWrite, mm.mode)
const objNumber = 10
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index 901528976..d90a5f4b6 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -1,8 +1,6 @@
package shard
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -20,21 +18,19 @@ var ErrDegradedMode = logicerr.New("shard is in degraded mode")
//
// Returns any error encountered that did not allow
// setting shard mode.
-func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error {
+func (s *Shard) SetMode(m mode.Mode) error {
unlock := s.lockExclusive()
defer unlock()
- return s.setMode(ctx, m)
+ return s.setMode(m)
}
-func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
- s.log.Info(ctx, logs.ShardSettingShardMode,
+func (s *Shard) setMode(m mode.Mode) error {
+ s.log.Info(logs.ShardSettingShardMode,
zap.Stringer("old_mode", s.info.Mode),
zap.Stringer("new_mode", m))
- components := []interface {
- SetMode(context.Context, mode.Mode) error
- }{
+ components := []interface{ SetMode(mode.Mode) error }{
s.metaBase, s.blobStor,
}
@@ -62,7 +58,7 @@ func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
if !m.Disabled() {
for i := range components {
- if err := components[i].SetMode(ctx, m); err != nil {
+ if err := components[i].SetMode(m); err != nil {
return err
}
}
@@ -71,7 +67,7 @@ func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
s.info.Mode = m
s.metricsWriter.SetMode(s.info.Mode)
- s.log.Info(ctx, logs.ShardShardModeSetSuccessfully,
+ s.log.Info(logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
return nil
}
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index f8cb00a31..24cc75154 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -67,12 +67,6 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var res common.PutRes
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return PutRes{}, err
- }
- defer release()
-
// exist check are not performed there, these checks should be executed
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
@@ -81,13 +75,13 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
}
if err != nil || !tryCache {
if err != nil {
- s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
- zap.Error(err))
+ s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
+ zap.String("err", err.Error()))
}
res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
- return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err)
+ return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
}
}
@@ -100,7 +94,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
if err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
- return PutRes{}, fmt.Errorf("put object to metabase: %w", err)
+ return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
}
if res.Inserted {
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index 443689104..701268820 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -131,12 +131,6 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
return obj, nil
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return RngRes{}, err
- }
- defer release()
-
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go
index 06fe9f511..cc73db316 100644
--- a/pkg/local_object_storage/shard/range_test.go
+++ b/pkg/local_object_storage/shard/range_test.go
@@ -79,8 +79,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -95,7 +94,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
}),
},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 20f1f2b6f..0d83caa0c 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -6,13 +6,10 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -21,9 +18,37 @@ import (
var ErrRebuildInProgress = errors.New("shard rebuild in progress")
+type RebuildWorkerLimiter interface {
+ AcquireWorkSlot(ctx context.Context) error
+ ReleaseWorkSlot()
+}
+
+type rebuildLimiter struct {
+ semaphore chan struct{}
+}
+
+func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter {
+ return &rebuildLimiter{
+ semaphore: make(chan struct{}, workersCount),
+ }
+}
+
+func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error {
+ select {
+ case l.semaphore <- struct{}{}:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (l *rebuildLimiter) ReleaseWorkSlot() {
+ <-l.semaphore
+}
+
type rebuildTask struct {
- concurrencyLimiter common.RebuildLimiter
- fillPercent int
+ limiter RebuildWorkerLimiter
+ fillPercent int
}
type rebuilder struct {
@@ -63,37 +88,36 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D
if !ok {
continue
}
- runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter)
+ runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter)
}
}
}()
}
func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
- fillPercent int, concLimiter common.RebuildLimiter,
+ fillPercent int, limiter RebuildWorkerLimiter,
) {
select {
case <-ctx.Done():
return
default:
}
- log.Info(ctx, logs.BlobstoreRebuildStarted)
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil {
- log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err))
+ log.Info(logs.BlobstoreRebuildStarted)
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil {
+ log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
} else {
- log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully)
+ log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
}
}
-func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int,
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int,
) error {
select {
case <-ctx.Done():
return ctx.Err()
case r.tasks <- rebuildTask{
- concurrencyLimiter: limiter,
- fillPercent: fillPercent,
+ limiter: limiter,
+ fillPercent: fillPercent,
}:
return nil
default:
@@ -101,7 +125,7 @@ func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildL
}
}
-func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) {
+func (r *rebuilder) Stop(log *logger.Logger) {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -114,7 +138,7 @@ func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) {
r.wg.Wait()
r.cancel = nil
r.done = nil
- log.Info(ctx, logs.BlobstoreRebuildStopped)
+ log.Info(logs.BlobstoreRebuildStopped)
}
var errMBIsNotAvailable = errors.New("metabase is not available")
@@ -142,7 +166,7 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres
}
type RebuildPrm struct {
- ConcurrencyLimiter common.ConcurrencyLimiter
+ ConcurrencyLimiter RebuildWorkerLimiter
TargetFillPercent uint32
}
@@ -164,30 +188,5 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
return ErrDegradedMode
}
- limiter := &rebuildLimiter{
- concurrencyLimiter: p.ConcurrencyLimiter,
- rateLimiter: s.opsLimiter,
- }
- return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent))
-}
-
-var _ common.RebuildLimiter = (*rebuildLimiter)(nil)
-
-type rebuildLimiter struct {
- concurrencyLimiter common.ConcurrencyLimiter
- rateLimiter qos.Limiter
-}
-
-func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
- return r.concurrencyLimiter.AcquireWorkSlot(ctx)
-}
-
-func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) {
- release, err := r.rateLimiter.ReadRequest(ctx)
- return common.ReleaseFunc(release), err
-}
-
-func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) {
- release, err := r.rateLimiter.WriteRequest(ctx)
- return common.ReleaseFunc(release), err
+ return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent))
}
diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go
index d90343265..0025bb45a 100644
--- a/pkg/local_object_storage/shard/refill_test.go
+++ b/pkg/local_object_storage/shard/refill_test.go
@@ -34,7 +34,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)},
})
- defer func() { require.NoError(b, sh.Close(context.Background())) }()
+ defer func() { require.NoError(b, sh.Close()) }()
var putPrm PutPrm
@@ -61,7 +61,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, err)
}
- require.NoError(b, sh.Close(context.Background()))
+ require.NoError(b, sh.Close())
require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path))
require.NoError(b, sh.Open(context.Background()))
@@ -72,5 +72,5 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, sh.Init(context.Background()))
- require.NoError(b, sh.Close(context.Background()))
+ require.NoError(b, sh.Close())
}
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index e563f390b..7dd7189bb 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -59,7 +59,7 @@ func TestShardReload(t *testing.T) {
require.NoError(t, sh.Init(context.Background()))
defer func() {
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
}()
objects := make([]objAddr, 5)
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index fbc751e26..184ca9b71 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -60,12 +60,6 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
return SelectRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return SelectRes{}, nil
- }
- defer release()
-
var selectPrm meta.SelectPrm
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
@@ -73,7 +67,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
- return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err)
+ return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
}
return SelectRes{
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index f21541d9d..413bfd2f7 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -7,7 +7,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -39,8 +38,6 @@ type Shard struct {
rb *rebuilder
- opsLimiter *atomicOpsLimiter
-
gcCancel atomic.Value
setModeRequested atomic.Bool
writecacheSealCancel atomic.Pointer[writecacheSealCanceler]
@@ -98,23 +95,20 @@ type cfg struct {
metricsWriter MetricsWriter
- reportErrorFunc func(ctx context.Context, selfID string, message string, err error)
+ reportErrorFunc func(selfID string, message string, err error)
containerInfo container.InfoProvider
-
- configOpsLimiter qos.Limiter
}
func defaultCfg() *cfg {
return &cfg{
rmBatchSize: 100,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
gcCfg: defaultGCCfg(),
- reportErrorFunc: func(context.Context, string, string, error) {},
+ reportErrorFunc: func(string, string, error) {},
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
metricsWriter: noopMetrics{},
- configOpsLimiter: qos.NewNoopLimiter(),
}
}
@@ -130,15 +124,14 @@ func New(opts ...Option) *Shard {
mb := meta.New(c.metaOpts...)
s := &Shard{
- cfg: c,
- blobStor: bs,
- metaBase: mb,
- tsSource: c.tsSource,
- opsLimiter: newAtomicOpsLimiter(c.configOpsLimiter),
+ cfg: c,
+ blobStor: bs,
+ metaBase: mb,
+ tsSource: c.tsSource,
}
- reportFunc := func(ctx context.Context, msg string, err error) {
- s.reportErrorFunc(ctx, s.ID().String(), msg, err)
+ reportFunc := func(msg string, err error) {
+ s.reportErrorFunc(s.ID().String(), msg, err)
}
s.blobStor.SetReportErrorFunc(reportFunc)
@@ -148,8 +141,7 @@ func New(opts ...Option) *Shard {
append(c.writeCacheOpts,
writecache.WithReportErrorFunc(reportFunc),
writecache.WithBlobstor(bs),
- writecache.WithMetabase(mb),
- writecache.WithQoSLimiter(s.opsLimiter))...)
+ writecache.WithMetabase(mb))...)
s.writeCache.GetMetrics().SetPath(s.writeCache.DumpInfo().Path)
}
@@ -209,7 +201,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option {
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = l
- c.gcCfg.log = l.WithTag(logger.TagGC)
+ c.gcCfg.log = l
}
}
@@ -222,7 +214,7 @@ func WithWriteCache(use bool) Option {
// hasWriteCache returns bool if write cache exists on shards.
func (s *Shard) hasWriteCache() bool {
- return s.useWriteCache
+ return s.cfg.useWriteCache
}
// NeedRefillMetabase returns true if metabase is needed to be refilled.
@@ -325,7 +317,7 @@ func WithGCMetrics(v GCMectrics) Option {
// WithReportErrorFunc returns option to specify callback for handling storage-related errors
// in the background workers.
-func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option {
+func WithReportErrorFunc(f func(selfID string, message string, err error)) Option {
return func(c *cfg) {
c.reportErrorFunc = f
}
@@ -376,22 +368,16 @@ func WithContainerInfoProvider(containerInfo container.InfoProvider) Option {
}
}
-func WithLimiter(l qos.Limiter) Option {
- return func(c *cfg) {
- c.configOpsLimiter = l
- }
-}
-
func (s *Shard) fillInfo() {
- s.info.MetaBaseInfo = s.metaBase.DumpInfo()
- s.info.BlobStorInfo = s.blobStor.DumpInfo()
- s.info.Mode = s.GetMode()
+ s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
+ s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
+ s.cfg.info.Mode = s.GetMode()
- if s.useWriteCache {
- s.info.WriteCacheInfo = s.writeCache.DumpInfo()
+ if s.cfg.useWriteCache {
+ s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo()
}
if s.pilorama != nil {
- s.info.PiloramaInfo = s.pilorama.DumpInfo()
+ s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo()
}
}
@@ -415,7 +401,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
cc, err := s.metaBase.ObjectCounters()
if err != nil {
- s.log.Warn(ctx, logs.ShardMetaObjectCounterRead,
+ s.log.Warn(logs.ShardMetaObjectCounterRead,
zap.Error(err),
)
@@ -428,7 +414,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
cnrList, err := s.metaBase.Containers(ctx)
if err != nil {
- s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err))
+ s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
return
}
@@ -437,7 +423,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
for i := range cnrList {
size, err := s.metaBase.ContainerSize(cnrList[i])
if err != nil {
- s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize,
+ s.log.Warn(logs.ShardMetaCantReadContainerSize,
zap.String("cid", cnrList[i].EncodeToString()),
zap.Error(err))
continue
@@ -450,7 +436,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
contCount, err := s.metaBase.ContainerCounters(ctx)
if err != nil {
- s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err))
+ s.log.Warn(logs.FailedToGetContainerCounters, zap.Error(err))
return
}
for contID, count := range contCount.Counts {
@@ -458,57 +444,57 @@ func (s *Shard) updateMetrics(ctx context.Context) {
s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
}
- s.metricsWriter.SetMode(s.info.Mode)
+ s.cfg.metricsWriter.SetMode(s.info.Mode)
}
// incObjectCounter increment both physical and logical object
// counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
- s.metricsWriter.IncObjectCounter(physical)
- s.metricsWriter.IncObjectCounter(logical)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
+ s.cfg.metricsWriter.IncObjectCounter(physical)
+ s.cfg.metricsWriter.IncObjectCounter(logical)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
if isUser {
- s.metricsWriter.IncObjectCounter(user)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
+ s.cfg.metricsWriter.IncObjectCounter(user)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
if v > 0 {
- s.metricsWriter.AddToObjectCounter(typ, -int(v))
+ s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) setObjectCounterBy(typ string, v uint64) {
if v > 0 {
- s.metricsWriter.SetObjectCounter(typ, v)
+ s.cfg.metricsWriter.SetObjectCounter(typ, v)
}
}
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
for cnrID, count := range byCnr {
if count.Phy > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
+ s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
}
if count.Logic > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
+ s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
}
if count.User > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
+ s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
}
}
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
if size != 0 {
- s.metricsWriter.AddToContainerSize(cnr, size)
+ s.cfg.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
if size != 0 {
- s.metricsWriter.AddToPayloadSize(size)
+ s.cfg.metricsWriter.AddToPayloadSize(size)
}
}
@@ -524,39 +510,3 @@ func (s *Shard) SetEvacuationInProgress(val bool) {
s.info.EvacuationInProgress = val
s.metricsWriter.SetEvacuationInProgress(val)
}
-
-var _ qos.Limiter = &atomicOpsLimiter{}
-
-func newAtomicOpsLimiter(l qos.Limiter) *atomicOpsLimiter {
- result := &atomicOpsLimiter{}
- result.ptr.Store(&qosLimiterHolder{Limiter: l})
- return result
-}
-
-type atomicOpsLimiter struct {
- ptr atomic.Pointer[qosLimiterHolder]
-}
-
-func (a *atomicOpsLimiter) Close() {
- a.ptr.Load().Close()
-}
-
-func (a *atomicOpsLimiter) ReadRequest(ctx context.Context) (qos.ReleaseFunc, error) {
- return a.ptr.Load().ReadRequest(ctx)
-}
-
-func (a *atomicOpsLimiter) SetMetrics(m qos.Metrics) {
- a.ptr.Load().SetMetrics(m)
-}
-
-func (a *atomicOpsLimiter) SetParentID(id string) {
- a.ptr.Load().SetParentID(id)
-}
-
-func (a *atomicOpsLimiter) WriteRequest(ctx context.Context) (qos.ReleaseFunc, error) {
- return a.ptr.Load().WriteRequest(ctx)
-}
-
-type qosLimiterHolder struct {
- qos.Limiter
-}
diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go
index 84be71c4d..73ba2e82b 100644
--- a/pkg/local_object_storage/shard/shard_test.go
+++ b/pkg/local_object_storage/shard/shard_test.go
@@ -60,8 +60,7 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -90,8 +89,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))),
WithWriteCache(enableWriteCache),
WithWriteCacheOptions(o.wcOpts),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go
index b1232707f..de00eabd1 100644
--- a/pkg/local_object_storage/shard/shutdown_test.go
+++ b/pkg/local_object_storage/shard/shutdown_test.go
@@ -52,10 +52,10 @@ func TestWriteCacheObjectLoss(t *testing.T) {
})
}
require.NoError(t, errG.Wait())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
var getPrm GetPrm
diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go
index db361a8bd..26dc8ec1e 100644
--- a/pkg/local_object_storage/shard/tree.go
+++ b/pkg/local_object_storage/shard/tree.go
@@ -43,11 +43,6 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeMove(ctx, d, treeID, m)
}
@@ -80,11 +75,6 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta)
}
@@ -113,46 +103,9 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync)
}
-// TreeApplyBatch implements the pilorama.Forest interface.
-func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- if s.pilorama == nil {
- return ErrPiloramaDisabled
- }
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return ErrReadOnlyMode
- }
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
- return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m)
-}
-
// TreeGetByPath implements the pilorama.Forest interface.
func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath",
@@ -177,11 +130,6 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
}
@@ -207,11 +155,6 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n
if s.info.Mode.NoMetabase() {
return pilorama.Meta{}, 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return pilorama.Meta{}, 0, err
- }
- defer release()
return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID)
}
@@ -237,16 +180,11 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID)
}
// TreeSortedByFilename implements the pilorama.Forest interface.
-func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
+func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
@@ -266,11 +204,6 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID
if s.info.Mode.NoMetabase() {
return nil, last, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, last, err
- }
- defer release()
return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
}
@@ -296,11 +229,6 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return pilorama.Move{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return pilorama.Move{}, err
- }
- defer release()
return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height)
}
@@ -325,11 +253,6 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeDrop(ctx, cid, treeID)
}
@@ -353,11 +276,6 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeList(ctx, cid)
}
@@ -381,11 +299,6 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u
if s.pilorama == nil {
return 0, ErrPiloramaDisabled
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
return s.pilorama.TreeHeight(ctx, cid, treeID)
}
@@ -410,11 +323,6 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b
if s.info.Mode.NoMetabase() {
return false, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return false, err
- }
- defer release()
return s.pilorama.TreeExists(ctx, cid, treeID)
}
@@ -443,11 +351,6 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
}
@@ -472,11 +375,6 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st
if s.info.Mode.NoMetabase() {
return 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID)
}
@@ -498,11 +396,6 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeListTrees(ctx, prm)
}
@@ -532,10 +425,5 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source)
}
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index 9edb89df8..a6de07f03 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -67,12 +67,6 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal)
}
@@ -130,19 +124,12 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
close(started)
defer cleanup()
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
- return
- }
- defer release()
-
- s.log.Info(ctx, logs.StartedWritecacheSealAsync)
+ s.log.Info(logs.StartedWritecacheSealAsync)
if err := s.writeCache.Seal(ctx, prm); err != nil {
- s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
+ s.log.Warn(logs.FailedToSealWritecacheAsync, zap.Error(err))
return
}
- s.log.Info(ctx, logs.WritecacheSealCompletedAsync)
+ s.log.Info(logs.WritecacheSealCompletedAsync)
}()
select {
case <-ctx.Done():
@@ -151,11 +138,5 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
return nil
}
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
return s.writeCache.Seal(ctx, prm)
}
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index fd85b4501..79ab7d9c6 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -43,12 +43,12 @@ func BenchmarkWriteAfterDelete(b *testing.B) {
b.SetParallelism(parallel)
benchmarkRunPar(b, cache, payloadSize)
})
- require.NoError(b, cache.Close(context.Background()))
+ require.NoError(b, cache.Close())
}
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close(context.Background())) }()
+ defer func() { require.NoError(b, cache.Close()) }()
ctx := context.Background()
objGen := testutil.RandObjGenerator{ObjSize: size}
@@ -71,7 +71,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close(context.Background())) }()
+ defer func() { require.NoError(b, cache.Close()) }()
benchmarkRunPar(b, cache, size)
}
@@ -100,7 +100,7 @@ func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening")
- require.NoError(b, cache.Init(context.Background()), "initializing")
+ require.NoError(b, cache.Init(), "initializing")
}
type testMetabase struct{}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index ee709ea73..b97fc5856 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -6,7 +6,6 @@ import (
"sync"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -56,13 +55,12 @@ func New(opts ...Option) Cache {
counter: fstree.NewSimpleCounter(),
options: options{
- log: logger.NewLoggerWrapper(zap.NewNop()),
+ log: &logger.Logger{Logger: zap.NewNop()},
maxObjectSize: defaultMaxObjectSize,
workersCount: defaultFlushWorkersCount,
maxCacheSize: defaultMaxCacheSize,
metrics: DefaultMetrics(),
flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
- qosLimiter: qos.NewNoopLimiter(),
},
}
@@ -96,24 +94,23 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error {
if err != nil {
return metaerr.Wrap(err)
}
- c.initCounters()
- return nil
+ return metaerr.Wrap(c.initCounters())
}
// Init runs necessary services.
-func (c *cache) Init(ctx context.Context) error {
+func (c *cache) Init() error {
c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode))
- if err := c.flushAndDropBBoltDB(ctx); err != nil {
+ if err := c.flushAndDropBBoltDB(context.Background()); err != nil {
return fmt.Errorf("flush previous version write-cache database: %w", err)
}
- ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache
+ ctx, cancel := context.WithCancel(context.Background())
c.cancel.Store(cancel)
c.runFlushLoop(ctx)
return nil
}
// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
-func (c *cache) Close(ctx context.Context) error {
+func (c *cache) Close() error {
if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil {
cancelValue.(context.CancelFunc)()
}
@@ -130,7 +127,7 @@ func (c *cache) Close(ctx context.Context) error {
var err error
if c.fsTree != nil {
- err = c.fsTree.Close(ctx)
+ err = c.fsTree.Close()
if err != nil {
c.fsTree = nil
}
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index 94a0a40db..dda284439 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -46,7 +46,7 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
storageType = StorageTypeFSTree
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
- storagelog.Write(ctx, c.log,
+ storagelog.Write(c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 893d27ba2..bfa6aacb0 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -6,7 +6,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -15,7 +14,6 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
@@ -37,7 +35,6 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String())
fl := newFlushLimiter(c.flushSizeLimit)
c.wg.Add(1)
go func() {
@@ -67,13 +64,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
continue
}
- release, err := c.qosLimiter.ReadRequest(ctx)
- if err != nil {
- c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err))
- c.modeMtx.RUnlock()
- continue
- }
- err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
if err := fl.acquire(oi.DataSize); err != nil {
return err
}
@@ -88,15 +79,11 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
return ctx.Err()
}
})
- release()
if err != nil {
- c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
+ c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
}
c.modeMtx.RUnlock()
-
- // counter changed by fstree
- c.estimateCacheSize()
case <-ctx.Done():
return
}
@@ -120,18 +107,12 @@ func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) {
func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) {
defer fl.release(objInfo.size)
- release, err := c.qosLimiter.WriteRequest(ctx)
- if err != nil {
- c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err))
- return
- }
- defer release()
res, err := c.fsTree.Get(ctx, common.GetPrm{
Address: objInfo.addr,
})
if err != nil {
if !client.IsErrObjectNotFound(err) {
- c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
}
return
}
@@ -145,11 +126,11 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI
c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData)))
}
-func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) {
+func (c *cache) reportFlushError(msg string, addr string, err error) {
if c.reportError != nil {
- c.reportError(ctx, msg, err)
+ c.reportError(msg, err)
} else {
- c.log.Error(ctx, msg,
+ c.log.Error(msg,
zap.String("address", addr),
zap.Error(err))
}
@@ -164,7 +145,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
var obj objectSDK.Object
err := obj.Unmarshal(e.ObjectData)
if err != nil {
- c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
+ c.reportFlushError(logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
if ignoreErrors {
return nil
}
@@ -202,7 +183,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b
if err != nil {
if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
!errors.Is(err, blobstor.ErrNoPlaceFound) {
- c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor,
+ c.reportFlushError(logs.FSTreeCantFushObjectBlobstor,
addr.EncodeToString(), err)
}
return err
@@ -214,7 +195,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b
_, err = c.metabase.UpdateStorageID(ctx, updPrm)
if err != nil {
- c.reportFlushError(ctx, logs.FSTreeCantUpdateID,
+ c.reportFlushError(logs.FSTreeCantUpdateID,
addr.EncodeToString(), err)
}
return err
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 7fc84657c..59a4e4895 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -38,9 +38,9 @@ func TestFlush(t *testing.T) {
errCountOpt := func() (Option, *atomic.Uint32) {
cnt := &atomic.Uint32{}
- return WithReportErrorFunc(func(ctx context.Context, msg string, err error) {
+ return WithReportErrorFunc(func(msg string, err error) {
cnt.Add(1)
- testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
+ testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
}), cnt
}
@@ -114,11 +114,11 @@ func runFlushTest[Option any](
) {
t.Run("no errors", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
require.NoError(t, wc.Flush(context.Background(), false, false))
@@ -127,15 +127,15 @@ func runFlushTest[Option any](
t.Run("flush on moving to degraded mode", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
// Blobstor is read-only, so we expect en error from `flush` here.
- require.Error(t, wc.SetMode(context.Background(), mode.Degraded))
+ require.Error(t, wc.SetMode(mode.Degraded))
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, wc.SetMode(context.Background(), mode.Degraded))
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
+ require.NoError(t, wc.SetMode(mode.Degraded))
check(t, mb, bs, objects)
})
@@ -145,12 +145,12 @@ func runFlushTest[Option any](
t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption()
wc, bs, mb := newCache(t, createCacheFn, errCountOpt)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
f.InjectFn(t, wc)
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
require.Equal(t, uint32(0), errCount.Load())
require.Error(t, wc.Flush(context.Background(), false, false))
@@ -173,7 +173,7 @@ func newCache[Option any](
meta.WithPath(filepath.Join(dir, "meta")),
meta.WithEpochState(dummyEpoch{}))
require.NoError(t, mb.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.Init(context.Background()))
+ require.NoError(t, mb.Init())
bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -184,15 +184,15 @@ func newCache[Option any](
},
}))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init(context.Background()))
+ require.NoError(t, bs.Init())
wc := createCacheFn(t, mb, bs, opts...)
require.NoError(t, wc.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, wc.Init(context.Background()))
+ require.NoError(t, wc.Init())
// First set mode for metabase and blobstor to prevent background flushes.
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, mb.SetMode(mode.ReadOnly))
+ require.NoError(t, bs.SetMode(mode.ReadOnly))
return wc, bs, mb
}
diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go
index e369fbd50..9ec039f91 100644
--- a/pkg/local_object_storage/writecache/iterate.go
+++ b/pkg/local_object_storage/writecache/iterate.go
@@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
return b.ForEach(func(k, _ []byte) error {
err := addr.DecodeString(string(k))
if err != nil {
- return fmt.Errorf("parse object address: %w", err)
+ return fmt.Errorf("could not parse object address: %w", err)
}
return f(addr)
diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go
index 0e020b36e..ddc4101be 100644
--- a/pkg/local_object_storage/writecache/limiter.go
+++ b/pkg/local_object_storage/writecache/limiter.go
@@ -3,8 +3,6 @@ package writecache
import (
"errors"
"sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
var errLimiterClosed = errors.New("acquire failed: limiter closed")
@@ -47,11 +45,17 @@ func (l *flushLimiter) release(size uint64) {
l.cond.L.Lock()
defer l.cond.L.Unlock()
- assert.True(l.size >= size, "flushLimiter: invalid size")
- l.size -= size
+ if l.size >= size {
+ l.size -= size
+ } else {
+ panic("flushLimiter: invalid size")
+ }
- assert.True(l.count > 0, "flushLimiter: invalid count")
- l.count--
+ if l.count > 0 {
+ l.count--
+ } else {
+ panic("flushLimiter: invalid count")
+ }
l.cond.Broadcast()
}
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index c491be60b..d12dd603b 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -23,8 +23,8 @@ type setModePrm struct {
// SetMode sets write-cache mode of operation.
// When shard is put in read-only mode all objects in memory are flushed to disk
// and all background jobs are suspended.
-func (c *cache) SetMode(ctx context.Context, m mode.Mode) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode",
+func (c *cache) SetMode(m mode.Mode) error {
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode",
trace.WithAttributes(
attribute.String("mode", m.String()),
))
@@ -60,7 +60,7 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
// guarantees that there are no in-fly operations.
for len(c.flushCh) != 0 {
- c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush)
+ c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
time.Sleep(time.Second)
}
@@ -82,8 +82,8 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
return nil
}
if !shrink {
- if err := c.fsTree.Close(ctx); err != nil {
- return fmt.Errorf("close write-cache storage: %w", err)
+ if err := c.fsTree.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache storage: %w", err)
}
return nil
}
@@ -98,19 +98,19 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
if errors.Is(err, errIterationCompleted) {
empty = false
} else {
- return fmt.Errorf("check write-cache items: %w", err)
+ return fmt.Errorf("failed to check write-cache items: %w", err)
}
}
- if err := c.fsTree.Close(ctx); err != nil {
- return fmt.Errorf("close write-cache storage: %w", err)
+ if err := c.fsTree.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache storage: %w", err)
}
if empty {
err := os.RemoveAll(c.path)
if err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("remove write-cache files: %w", err)
+ return fmt.Errorf("failed to remove write-cache files: %w", err)
}
} else {
- c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
+ c.log.Info(logs.WritecacheShrinkSkippedNotEmpty)
}
return nil
}
diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go
index 4fbadbc64..70cfe8382 100644
--- a/pkg/local_object_storage/writecache/mode_test.go
+++ b/pkg/local_object_storage/writecache/mode_test.go
@@ -18,13 +18,13 @@ func TestMode(t *testing.T) {
require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init(context.Background()))
+ require.NoError(t, wc.Init())
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close(context.Background()))
+ require.NoError(t, wc.Close())
require.NoError(t, wc.Open(context.Background(), mode.Degraded))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init(context.Background()))
+ require.NoError(t, wc.Init())
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close(context.Background()))
+ require.NoError(t, wc.Close())
}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index a4f98ad06..66ac7805c 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,10 +1,8 @@
package writecache
import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
// Option represents write-cache configuration option.
@@ -31,21 +29,19 @@ type options struct {
// noSync is true iff FSTree allows unsynchronized writes.
noSync bool
// reportError is the function called when encountering disk errors in background workers.
- reportError func(context.Context, string, error)
+ reportError func(string, error)
// metrics is metrics implementation
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
disableBackgroundFlush bool
// flushSizeLimit is total size of flushing objects.
flushSizeLimit uint64
- // qosLimiter used to limit flush RPS.
- qosLimiter qos.Limiter
}
// WithLogger sets logger.
func WithLogger(log *logger.Logger) Option {
return func(o *options) {
- o.log = log
+ o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))}
}
}
@@ -112,7 +108,7 @@ func WithNoSync(noSync bool) Option {
}
// WithReportErrorFunc sets error reporting function.
-func WithReportErrorFunc(f func(context.Context, string, error)) Option {
+func WithReportErrorFunc(f func(string, error)) Option {
return func(o *options) {
o.reportError = f
}
@@ -138,9 +134,3 @@ func WithFlushSizeLimit(v uint64) Option {
o.flushSizeLimit = v
}
}
-
-func WithQoSLimiter(l qos.Limiter) Option {
- return func(o *options) {
- o.qosLimiter = l
- }
-}
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index 2fbf50913..c53067bea 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -2,7 +2,6 @@ package writecache
import (
"context"
- "fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -60,15 +59,7 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro
// putBig writes object to FSTree and pushes it to the flush workers queue.
func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
- if prm.RawData == nil { // foolproof: RawData should be marshalled by shard.
- data, err := prm.Object.Marshal()
- if err != nil {
- return fmt.Errorf("cannot marshal object: %w", err)
- }
- prm.RawData = data
- }
- size := uint64(len(prm.RawData))
- if !c.hasEnoughSpace(size) {
+ if !c.hasEnoughSpaceFS() {
return ErrOutOfSpace
}
@@ -77,7 +68,7 @@ func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
return err
}
- storagelog.Write(ctx, c.log,
+ storagelog.Write(c.log,
storagelog.AddressField(prm.Address.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree PUT"),
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index 7a52d3672..835686fbb 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -7,6 +7,10 @@ func (c *cache) estimateCacheSize() (uint64, uint64) {
return count, size
}
+func (c *cache) hasEnoughSpaceFS() bool {
+ return c.hasEnoughSpace(c.maxObjectSize)
+}
+
func (c *cache) hasEnoughSpace(objectSize uint64) bool {
count, size := c.estimateCacheSize()
if c.maxCacheCount > 0 && count+1 > c.maxCacheCount {
@@ -15,6 +19,7 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool {
return c.maxCacheSize >= size+objectSize
}
-func (c *cache) initCounters() {
+func (c *cache) initCounters() error {
c.estimateCacheSize()
+ return nil
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index e88566cdf..2e52e5b20 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
fstree.WithFileCounter(c.counter),
)
if err := c.fsTree.Open(mod); err != nil {
- return fmt.Errorf("open FSTree: %w", err)
+ return fmt.Errorf("could not open FSTree: %w", err)
}
if err := c.fsTree.Init(); err != nil {
- return fmt.Errorf("init FSTree: %w", err)
+ return fmt.Errorf("could not init FSTree: %w", err)
}
return nil
@@ -43,9 +43,9 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
if err != nil && !client.IsErrObjectNotFound(err) {
- c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
+ c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
} else if err == nil {
- storagelog.Write(ctx, c.log,
+ storagelog.Write(c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go
index 5eb341ba4..3a100f1a3 100644
--- a/pkg/local_object_storage/writecache/upgrade.go
+++ b/pkg/local_object_storage/writecache/upgrade.go
@@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
return nil
}
if err != nil {
- return fmt.Errorf("check write-cache database existence: %w", err)
+ return fmt.Errorf("could not check write-cache database existence: %w", err)
}
db, err := OpenDB(c.path, true, os.OpenFile)
if err != nil {
- return fmt.Errorf("open write-cache database: %w", err)
+ return fmt.Errorf("could not open write-cache database: %w", err)
}
defer func() {
_ = db.Close()
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index 7ed511318..a973df604 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -38,21 +38,21 @@ type Cache interface {
// Returns ErrReadOnly if the Cache is currently in the read-only mode.
Delete(context.Context, oid.Address) error
Put(context.Context, common.PutPrm) (common.PutRes, error)
- SetMode(context.Context, mode.Mode) error
+ SetMode(mode.Mode) error
SetLogger(*logger.Logger)
DumpInfo() Info
Flush(context.Context, bool, bool) error
Seal(context.Context, SealPrm) error
- Init(context.Context) error
+ Init() error
Open(ctx context.Context, mode mode.Mode) error
- Close(context.Context) error
+ Close() error
GetMetrics() Metrics
}
// MainStorage is the interface of the underlying storage of Cache implementations.
type MainStorage interface {
- Compressor() *compression.Compressor
+ Compressor() *compression.Config
Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error)
Put(context.Context, common.PutPrm) (common.PutRes, error)
}
diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go
index 4462daab4..aae245acd 100644
--- a/pkg/morph/client/balance/balanceOf.go
+++ b/pkg/morph/client/balance/balanceOf.go
@@ -1,33 +1,36 @@
package balance
import (
- "context"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// BalanceOf receives the amount of funds in the client's account
// through the Balance contract call, and returns it.
-func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) {
- h := id.ScriptHash()
+func (c *Client) BalanceOf(id user.ID) (*big.Int, error) {
+ h, err := address.StringToUint160(id.EncodeToString())
+ if err != nil {
+ return nil, err
+ }
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(balanceOfMethod)
invokePrm.SetArgs(h)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err)
} else if ln := len(prms); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln)
}
amount, err := client.BigIntFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err)
}
return amount, nil
}
diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go
index f4685b0ab..4befbef45 100644
--- a/pkg/morph/client/balance/burn.go
+++ b/pkg/morph/client/balance/burn.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -32,12 +30,12 @@ func (b *BurnPrm) SetID(id []byte) {
}
// Burn destroys funds from the account.
-func (c *Client) Burn(ctx context.Context, p BurnPrm) error {
+func (c *Client) Burn(p BurnPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(burnMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go
index 1dacb9574..b05c526dc 100644
--- a/pkg/morph/client/balance/client.go
+++ b/pkg/morph/client/balance/client.go
@@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'balance' contract client: %w", err)
+ return nil, fmt.Errorf("could not create static client of Balance contract: %w", err)
}
return &Client{
@@ -54,7 +54,15 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ *o = append(*o, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go
index 57e61d62b..39e4b28e5 100644
--- a/pkg/morph/client/balance/decimals.go
+++ b/pkg/morph/client/balance/decimals.go
@@ -1,7 +1,6 @@
package balance
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,20 +8,20 @@ import (
// Decimals decimal precision of currency transactions
// through the Balance contract call, and returns it.
-func (c *Client) Decimals(ctx context.Context) (uint32, error) {
+func (c *Client) Decimals() (uint32, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(decimalsMethod)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err)
} else if ln := len(prms); ln != 1 {
return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln)
}
decimals, err := client.IntFromStackItem(prms[0])
if err != nil {
- return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err)
}
return uint32(decimals), nil
}
diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go
index 83e8b0586..a5b206799 100644
--- a/pkg/morph/client/balance/lock.go
+++ b/pkg/morph/client/balance/lock.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -44,12 +42,12 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) {
}
// Lock locks fund on the user account.
-func (c *Client) Lock(ctx context.Context, p LockPrm) error {
+func (c *Client) Lock(p LockPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(lockMethod)
prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go
index 082ade85e..73448da31 100644
--- a/pkg/morph/client/balance/mint.go
+++ b/pkg/morph/client/balance/mint.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -32,12 +30,12 @@ func (m *MintPrm) SetID(id []byte) {
}
// Mint sends funds to the account.
-func (c *Client) Mint(ctx context.Context, p MintPrm) error {
+func (c *Client) Mint(p MintPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(mintMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go
index 870bed166..08fb05289 100644
--- a/pkg/morph/client/balance/transfer.go
+++ b/pkg/morph/client/balance/transfer.go
@@ -1,11 +1,11 @@
package balance
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// TransferPrm groups parameters of TransferX method.
@@ -21,18 +21,27 @@ type TransferPrm struct {
// TransferX transfers p.Amount of GASe-12 from p.From to p.To
// with details p.Details through direct smart contract call.
-func (c *Client) TransferX(ctx context.Context, p TransferPrm) error {
- from := p.From.ScriptHash()
- to := p.To.ScriptHash()
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) TransferX(p TransferPrm) error {
+ from, err := address.StringToUint160(p.From.EncodeToString())
+ if err != nil {
+ return err
+ }
+
+ to, err := address.StringToUint160(p.To.EncodeToString())
+ if err != nil {
+ return err
+ }
prm := client.InvokePrm{}
prm.SetMethod(transferXMethod)
prm.SetArgs(from, to, p.Amount, p.Details)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err = c.client.Invoke(prm)
if err != nil {
- return fmt.Errorf("invoke method (%s): %w", transferXMethod, err)
+ return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err)
}
return nil
}
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index aab058d27..933f1039f 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -9,7 +9,6 @@ import (
"sync/atomic"
"time"
- nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
@@ -61,9 +60,6 @@ type Client struct {
rpcActor *actor.Actor // neo-go RPC actor
gasToken *nep17.Token // neo-go GAS token wrapper
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
- nnsHash util.Uint160 // NNS contract hash
-
- nnsReader *nnsClient.ContractReader // NNS contract wrapper
acc *wallet.Account // neo account
accAddr util.Uint160 // account's address
@@ -98,12 +94,27 @@ type Client struct {
type cache struct {
m sync.RWMutex
+ nnsHash *util.Uint160
gKey *keys.PublicKey
txHeights *lru.Cache[util.Uint256, uint32]
metrics metrics.MorphCacheMetrics
}
+func (c *cache) nns() *util.Uint160 {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.nnsHash
+}
+
+func (c *cache) setNNSHash(nnsHash util.Uint160) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.nnsHash = &nnsHash
+}
+
func (c *cache) groupKey() *keys.PublicKey {
c.m.RLock()
defer c.m.RUnlock()
@@ -122,6 +133,7 @@ func (c *cache) invalidate() {
c.m.Lock()
defer c.m.Unlock()
+ c.nnsHash = nil
c.gKey = nil
c.txHeights.Purge()
}
@@ -151,10 +163,24 @@ func (e *notHaltStateError) Error() string {
)
}
+// implementation of error interface for FrostFS-specific errors.
+type frostfsError struct {
+ err error
+}
+
+func (e frostfsError) Error() string {
+ return fmt.Sprintf("frostfs error: %v", e.err)
+}
+
+// wraps FrostFS-specific error into frostfsError. Arg must not be nil.
+func wrapFrostFSError(err error) error {
+ return frostfsError{err}
+}
+
// Invoke invokes contract method by sending transaction into blockchain.
// Returns valid until block value.
// Supported args types: int64, string, util.Uint160, []byte and bool.
-func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) {
+func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
start := time.Now()
success := false
defer func() {
@@ -165,29 +191,29 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return 0, ErrConnectionLost
}
txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
if err != nil {
- return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err)
+ return 0, fmt.Errorf("could not invoke %s: %w", method, err)
}
- c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
+ c.logger.Debug(logs.ClientNeoClientInvoke,
zap.String("method", method),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
success = true
- return InvokeRes{Hash: txHash, VUB: vub}, nil
+ return vub, nil
}
// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
// If cb returns an error, the session is closed and this error is returned as-is.
-// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
+// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
// The default batchSize is 100, the default limit from neo-go.
-func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error {
+func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error {
start := time.Now()
success := false
defer func() {
@@ -214,7 +240,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
if err != nil {
return err
} else if val.State != HaltState {
- return ¬HaltStateError{state: val.State, exception: val.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
}
arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err)
@@ -236,7 +262,10 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
}()
// Batch size for TraverseIterator() can restricted on the server-side.
- traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems)
+ traverseBatchSize := batchSize
+ if invoker.DefaultIteratorResultItems < traverseBatchSize {
+ traverseBatchSize = invoker.DefaultIteratorResultItems
+ }
for {
items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize)
if err != nil {
@@ -278,7 +307,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (
}
if val.State != HaltState {
- return nil, ¬HaltStateError{state: val.State, exception: val.FaultException}
+ return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
}
success = true
@@ -299,7 +328,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
return err
}
- c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke,
+ c.logger.Debug(logs.ClientNativeGasTransferInvoke,
zap.String("to", receiver.StringLE()),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -333,7 +362,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
return err
}
- c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke,
+ c.logger.Debug(logs.ClientBatchGasTransferInvoke,
zap.Strings("to", receiversLog),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -360,8 +389,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
- zap.Error(err))
+ c.logger.Error(logs.ClientCantGetBlockchainHeight,
+ zap.String("error", err.Error()))
return nil
}
@@ -374,8 +403,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
- zap.Error(err))
+ c.logger.Error(logs.ClientCantGetBlockchainHeight243,
+ zap.String("error", err.Error()))
return nil
}
@@ -470,7 +499,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
// NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain
// stores alphabet node keys of inner ring there, however the sidechain stores both
// alphabet and non alphabet node keys of inner ring.
-func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) {
+func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -480,7 +509,7 @@ func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err
list, err := c.roleList(noderoles.NeoFSAlphabet)
if err != nil {
- return nil, fmt.Errorf("get alphabet nodes role list: %w", err)
+ return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err)
}
return list, nil
@@ -494,7 +523,7 @@ func (c *Client) GetDesignateHash() util.Uint160 {
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
height, err := c.rpcActor.GetBlockCount()
if err != nil {
- return nil, fmt.Errorf("get chain height: %w", err)
+ return nil, fmt.Errorf("can't get chain height: %w", err)
}
return c.rolemgmt.GetDesignatedByRole(r, height)
@@ -565,7 +594,6 @@ func (c *Client) setActor(act *actor.Actor) {
c.rpcActor = act
c.gasToken = nep17.New(act, gas.Hash)
c.rolemgmt = rolemgmt.New(act)
- c.nnsReader = nnsClient.NewReader(act, c.nnsHash)
}
func (c *Client) GetActor() *actor.Actor {
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index e4dcd0db7..08d16deb4 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -61,7 +61,7 @@ var ErrNoHealthyEndpoint = errors.New("no healthy endpoint")
func defaultConfig() *cfg {
return &cfg{
dialTimeout: defaultDialTimeout,
- logger: logger.NewLoggerWrapper(zap.L()),
+ logger: &logger.Logger{Logger: zap.L()},
metrics: morphmetrics.NoopRegister{},
waitInterval: defaultWaitInterval,
signer: &transaction.Signer{
@@ -130,10 +130,10 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
for cli.endpoints.curr, endpoint = range cli.endpoints.list {
cli.client, act, err = cli.newCli(ctx, endpoint)
if err != nil {
- cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
+ cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
zap.Error(err), zap.String("endpoint", endpoint.Address))
} else {
- cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint,
+ cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint,
zap.String("endpoint", endpoint.Address))
if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
cli.switchIsActive.Store(true)
@@ -145,11 +145,6 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
if cli.client == nil {
return nil, ErrNoHealthyEndpoint
}
- cs, err := cli.client.GetContractStateByID(nnsContractID)
- if err != nil {
- return nil, fmt.Errorf("resolve nns hash: %w", err)
- }
- cli.nnsHash = cs.Hash
cli.setActor(act)
go cli.closeWaiter(ctx)
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index be684619b..b512a6594 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -27,6 +27,7 @@ const (
getMethod = "get"
listMethod = "list"
containersOfMethod = "containersOf"
+ eaclMethod = "eACL"
deletionInfoMethod = "deletionInfo"
// putNamedMethod is method name for container put with an alias. It is exported to provide custom fee.
@@ -46,9 +47,9 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
opts[i](o)
}
- sc, err := client.NewStatic(cli, contract, fee, *o...)
+ sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...)
if err != nil {
- return nil, fmt.Errorf("create 'container' contract client: %w", err)
+ return nil, fmt.Errorf("can't create container static client: %w", err)
}
return &Client{client: sc}, nil
@@ -68,10 +69,20 @@ func (c Client) ContractAddress() util.Uint160 {
// parameter of Wrapper.
type Option func(*opts)
-type opts []client.StaticClientOption
+type opts struct {
+ staticOpts []client.StaticClientOption
+}
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ o.staticOpts = append(o.staticOpts, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
@@ -81,6 +92,6 @@ func defaultOpts() *opts {
// Considered to be used by IR nodes only.
func AsAlphabet() Option {
return func(o *opts) {
- *o = append(*o, client.AsAlphabet())
+ o.staticOpts = append(o.staticOpts, client.AsAlphabet())
}
}
diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go
index 60fb8ad7c..c4db0fe6e 100644
--- a/pkg/morph/client/container/containers_of.go
+++ b/pkg/morph/client/container/containers_of.go
@@ -1,9 +1,10 @@
package container
import (
- "context"
"errors"
+ "fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
@@ -14,37 +15,28 @@ import (
// to the specified user of FrostFS system. If idUser is nil, returns the list of all containers.
//
// If remote RPC does not support neo-go session API, fallback to List() method.
-func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) {
- var cidList []cid.ID
- var err error
-
- cb := func(id cid.ID) error {
- cidList = append(cidList, id)
- return nil
- }
- if err = c.IterateContainersOf(ctx, idUser, cb); err != nil {
- return nil, err
- }
- return cidList, nil
-}
-
-// iterateContainers iterates over a list of container identifiers
-// belonging to the specified user of FrostFS system and executes
-// `cb` on each element. If idUser is nil, calls it on the list of all containers.
-func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error {
+func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
var rawID []byte
+
if idUser != nil {
rawID = idUser.WalletBytes()
}
- itemCb := func(item stackitem.Item) error {
- id, err := getCIDfromStackItem(item)
+ var cidList []cid.ID
+ cb := func(item stackitem.Item) error {
+ rawID, err := client.BytesFromStackItem(item)
if err != nil {
- return err
+ return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err)
}
- if err = cb(id); err != nil {
- return err
+
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return fmt.Errorf("decode container ID: %w", err)
}
+
+ cidList = append(cidList, id)
return nil
}
@@ -58,10 +50,13 @@ func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb fu
const batchSize = 512
cnrHash := c.client.ContractAddress()
- err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID)
- if err != nil && errors.Is(err, unwrap.ErrNoSessionID) {
- return c.iterate(ctx, idUser, cb)
+ err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID)
+ if err != nil {
+ if errors.Is(err, unwrap.ErrNoSessionID) {
+ return c.list(idUser)
+ }
+ return nil, err
}
- return err
+ return cidList, nil
}
diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go
index 09912efa5..20351b570 100644
--- a/pkg/morph/client/container/delete.go
+++ b/pkg/morph/client/container/delete.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
@@ -13,7 +12,7 @@ import (
// along with signature and session token.
//
// Returns error if container ID is nil.
-func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
+func Delete(c *Client, witness core.RemovalWitness) error {
binCnr := make([]byte, sha256.Size)
witness.ContainerID.Encode(binCnr)
@@ -27,7 +26,7 @@ func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
prm.SetToken(tok.Marshal())
}
- _, err := c.Delete(ctx, prm)
+ _, err := c.Delete(prm)
return err
}
@@ -66,7 +65,9 @@ func (d *DeletePrm) SetKey(key []byte) {
//
// Returns valid until block and any error encountered that caused
// the removal to interrupt.
-func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) Delete(p DeletePrm) (uint32, error) {
if len(p.signature) == 0 && !p.IsControl() {
return 0, errNilArgument
}
@@ -76,9 +77,9 @@ func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
prm.SetArgs(p.cnr, p.signature, p.key, p.token)
prm.InvokePrmOptional = p.InvokePrmOptional
- res, err := c.client.Invoke(ctx, prm)
+ res, err := c.client.Invoke(prm)
if err != nil {
- return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err)
+ return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err)
}
return res.VUB, nil
}
diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go
index 90bcdd7d5..dda6bf98c 100644
--- a/pkg/morph/client/container/deletion_info.go
+++ b/pkg/morph/client/container/deletion_info.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
"strings"
@@ -15,39 +14,39 @@ import (
"github.com/mr-tron/base58"
)
-func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) {
- return DeletionInfo(ctx, (*Client)(x), cnr)
+func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) {
+ return DeletionInfo((*Client)(x), cnr)
}
type deletionInfo interface {
- DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error)
+ DeletionInfo(cid []byte) (*containercore.DelInfo, error)
}
-func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
+func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.DeletionInfo(ctx, binCnr)
+ return c.DeletionInfo(binCnr)
}
-func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) {
+func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(deletionInfoMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err)
}
if len(arr) != 2 {
@@ -56,17 +55,17 @@ func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.D
rawOwner, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err)
}
var owner user.ID
if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil {
- return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not decode container owner id (%s): %w", deletionInfoMethod, err)
}
epoch, err := client.BigIntFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err)
}
return &containercore.DelInfo{
diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go
new file mode 100644
index 000000000..9e604e091
--- /dev/null
+++ b/pkg/morph/client/container/eacl.go
@@ -0,0 +1,95 @@
+package container
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+)
+
+// GetEACL reads the extended ACL table from FrostFS system
+// through Container contract call.
+//
+// Returns apistatus.EACLNotFound if eACL table is missing in the contract.
+func (c *Client) GetEACL(cnr cid.ID) (*container.EACL, error) {
+ binCnr := make([]byte, sha256.Size)
+ cnr.Encode(binCnr)
+
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(eaclMethod)
+ prm.SetArgs(binCnr)
+
+ prms, err := c.client.TestInvoke(prm)
+ if err != nil {
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", eaclMethod, err)
+ } else if ln := len(prms); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", eaclMethod, ln)
+ }
+
+ arr, err := client.ArrayFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get item array of eACL (%s): %w", eaclMethod, err)
+ }
+
+ if len(arr) != 4 {
+ return nil, fmt.Errorf("unexpected eacl stack item count (%s): %d", eaclMethod, len(arr))
+ }
+
+ rawEACL, err := client.BytesFromStackItem(arr[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL (%s): %w", eaclMethod, err)
+ }
+
+ sig, err := client.BytesFromStackItem(arr[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL signature (%s): %w", eaclMethod, err)
+ }
+
+ // Client may not return errors if the table is missing, so check this case additionally.
+ // The absence of a signature in the response can be taken as an eACL absence criterion,
+ // since unsigned table cannot be approved in the storage by design.
+ if len(sig) == 0 {
+ return nil, new(apistatus.EACLNotFound)
+ }
+
+ pub, err := client.BytesFromStackItem(arr[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL public key (%s): %w", eaclMethod, err)
+ }
+
+ binToken, err := client.BytesFromStackItem(arr[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL session token (%s): %w", eaclMethod, err)
+ }
+
+ var res container.EACL
+
+ res.Value = eacl.NewTable()
+ if err = res.Value.Unmarshal(rawEACL); err != nil {
+ return nil, err
+ }
+
+ if len(binToken) > 0 {
+ res.Session = new(session.Container)
+
+ err = res.Session.Unmarshal(binToken)
+ if err != nil {
+ return nil, fmt.Errorf("could not unmarshal session token: %w", err)
+ }
+ }
+
+ // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
+ var sigV2 refs.Signature
+ sigV2.SetKey(pub)
+ sigV2.SetSign(sig)
+ sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256)
+
+ err = res.Signature.ReadFromV2(sigV2)
+ return &res, err
+}
diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go
index 8622d2cdd..ea57a3a95 100644
--- a/pkg/morph/client/container/get.go
+++ b/pkg/morph/client/container/get.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
"strings"
@@ -17,8 +16,8 @@ import (
type containerSource Client
-func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) {
- return Get(ctx, (*Client)(x), cnr)
+func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) {
+ return Get((*Client)(x), cnr)
}
// AsContainerSource provides container Source interface
@@ -28,15 +27,15 @@ func AsContainerSource(w *Client) containercore.Source {
}
type getContainer interface {
- Get(ctx context.Context, cid []byte) (*containercore.Container, error)
+ Get(cid []byte) (*containercore.Container, error)
}
// Get marshals container ID, and passes it to Wrapper's Get method.
-func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) {
+func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.Get(ctx, binCnr)
+ return c.Get(binCnr)
}
// Get reads the container from FrostFS system by binary identifier
@@ -44,24 +43,24 @@ func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Contai
//
// If an empty slice is returned for the requested identifier,
// storage.ErrNotFound error is returned.
-func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
+func (c *Client) Get(cid []byte) (*containercore.Container, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(getMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err)
}
if len(arr) != 4 {
@@ -70,29 +69,29 @@ func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container,
cnrBytes, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err)
}
sigBytes, err := client.BytesFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err)
}
pub, err := client.BytesFromStackItem(arr[2])
if err != nil {
- return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err)
}
tokBytes, err := client.BytesFromStackItem(arr[3])
if err != nil {
- return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err)
}
var cnr containercore.Container
if err := cnr.Value.Unmarshal(cnrBytes); err != nil {
// use other major version if there any
- return nil, fmt.Errorf("unmarshal container: %w", err)
+ return nil, fmt.Errorf("can't unmarshal container: %w", err)
}
if len(tokBytes) > 0 {
@@ -100,7 +99,7 @@ func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container,
err = cnr.Session.Unmarshal(tokBytes)
if err != nil {
- return nil, fmt.Errorf("unmarshal session token: %w", err)
+ return nil, fmt.Errorf("could not unmarshal session token: %w", err)
}
}
diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go
index fc63d1beb..6fed46c1a 100644
--- a/pkg/morph/client/container/list.go
+++ b/pkg/morph/client/container/list.go
@@ -1,22 +1,20 @@
package container
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
-// iterate iterates through a list of container identifiers belonging
+// list returns a list of container identifiers belonging
// to the specified user of FrostFS system. The list is composed
// through Container contract call.
//
-// Iterates through the identifiers of all FrostFS containers if pointer
+// Returns the identifiers of all FrostFS containers if pointer
// to user identifier is nil.
-func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error {
+func (c *Client) list(idUser *user.ID) ([]cid.ID, error) {
var rawID []byte
if idUser != nil {
@@ -27,43 +25,34 @@ func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) e
prm.SetMethod(listMethod)
prm.SetArgs(rawID)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return fmt.Errorf("test invoke (%s): %w", listMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err)
} else if ln := len(res); ln != 1 {
- return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
}
res, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err)
+ return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err)
}
+ cidList := make([]cid.ID, 0, len(res))
for i := range res {
- id, err := getCIDfromStackItem(res[i])
+ rawID, err := client.BytesFromStackItem(res[i])
if err != nil {
- return err
+ return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err)
}
- if err = cb(id); err != nil {
- return err
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
}
+
+ cidList = append(cidList, id)
}
- return nil
-}
-
-func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) {
- rawID, err := client.BytesFromStackItem(item)
- if err != nil {
- return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err)
- }
-
- var id cid.ID
-
- err = id.Decode(rawID)
- if err != nil {
- return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
- }
- return id, nil
+ return cidList, nil
}
diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go
index 3bb84eb87..777ae2d4e 100644
--- a/pkg/morph/client/container/put.go
+++ b/pkg/morph/client/container/put.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"fmt"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
@@ -15,7 +14,7 @@ import (
// along with sig.Key() and sig.Sign().
//
// Returns error if container is nil.
-func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) {
+func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
data := cnr.Value.Marshal()
d := container.ReadDomain(cnr.Value)
@@ -36,7 +35,7 @@ func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID,
prm.SetKey(sigV2.GetKey())
prm.SetSignature(sigV2.GetSign())
- err := c.Put(ctx, prm)
+ err := c.Put(prm)
if err != nil {
return nil, err
}
@@ -94,7 +93,9 @@ func (p *PutPrm) SetZone(zone string) {
//
// Returns calculated container identifier and any error
// encountered that caused the saving to interrupt.
-func (c *Client) Put(ctx context.Context, p PutPrm) error {
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) Put(p PutPrm) error {
if len(p.sig) == 0 || len(p.key) == 0 {
return errNilArgument
}
@@ -115,9 +116,9 @@ func (c *Client) Put(ctx context.Context, p PutPrm) error {
prm.SetMethod(method)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
if err != nil {
- return fmt.Errorf("invoke method (%s): %w", method, err)
+ return fmt.Errorf("could not invoke method (%s): %w", method, err)
}
return nil
}
diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go
index d3eba7639..016b56f8f 100644
--- a/pkg/morph/client/frostfs/cheque.go
+++ b/pkg/morph/client/frostfs/cheque.go
@@ -1,8 +1,6 @@
package frostfscontract
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -39,13 +37,13 @@ func (c *ChequePrm) SetLock(lock util.Uint160) {
}
// Cheque invokes `cheque` method of FrostFS contract.
-func (x *Client) Cheque(ctx context.Context, p ChequePrm) error {
+func (x *Client) Cheque(p ChequePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(chequeMethod)
prm.SetArgs(p.id, p.user, p.amount, p.lock)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(ctx, prm)
+ _, err := x.client.Invoke(prm)
return err
}
@@ -68,12 +66,12 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) {
}
// AlphabetUpdate update list of alphabet nodes.
-func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error {
+func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(alphabetUpdateMethod)
prm.SetArgs(p.id, p.pubs)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(ctx, prm)
+ _, err := x.client.Invoke(prm)
return err
}
diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go
index cd6a9849e..571915c27 100644
--- a/pkg/morph/client/frostfs/client.go
+++ b/pkg/morph/client/frostfs/client.go
@@ -35,7 +35,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'frostfs' contract client: %w", err)
+ return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go
index 61eb03f09..4c31f42de 100644
--- a/pkg/morph/client/frostfsid/client.go
+++ b/pkg/morph/client/frostfsid/client.go
@@ -27,7 +27,7 @@ var _ frostfsidcore.SubjectProvider = (*Client)(nil)
func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) {
sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet())
if err != nil {
- return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err)
+ return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go
index 3a789672a..0852f536c 100644
--- a/pkg/morph/client/frostfsid/subject.go
+++ b/pkg/morph/client/frostfsid/subject.go
@@ -1,7 +1,6 @@
package frostfsid
import (
- "context"
"fmt"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
@@ -15,14 +14,14 @@ const (
methodGetSubjectExtended = "getSubjectExtended"
)
-func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
+func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(methodGetSubject)
prm.SetArgs(addr)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubject, err)
}
structArr, err := checkStackItem(res)
@@ -32,20 +31,20 @@ func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidc
subj, err := frostfsidclient.ParseSubject(structArr)
if err != nil {
- return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err)
}
return subj, nil
}
-func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
+func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(methodGetSubjectExtended)
prm.SetArgs(addr)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubjectExtended, err)
}
structArr, err := checkStackItem(res)
@@ -55,7 +54,7 @@ func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*fr
subj, err := frostfsidclient.ParseSubjectExtended(structArr)
if err != nil {
- return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err)
}
return subj, nil
@@ -68,7 +67,7 @@ func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error
structArr, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("could not get item array of container (%s): %w", methodGetSubject, err)
}
return
}
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index b9e39c25e..10ed21582 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -2,7 +2,6 @@ package client
import (
"context"
- "slices"
"sort"
"time"
@@ -43,7 +42,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
newEndpoint := c.endpoints.list[c.endpoints.curr]
cli, act, err := c.newCli(ctx, newEndpoint)
if err != nil {
- c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
+ c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
zap.String("endpoint", newEndpoint.Address),
zap.Error(err),
)
@@ -53,7 +52,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
c.cache.invalidate()
- c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
+ c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
zap.String("endpoint", newEndpoint.Address))
c.client = cli
@@ -100,7 +99,8 @@ mainLoop:
case <-t.C:
c.switchLock.RLock()
- endpointsCopy := slices.Clone(c.endpoints.list)
+ endpointsCopy := make([]Endpoint, len(c.endpoints.list))
+ copy(endpointsCopy, c.endpoints.list)
currPriority := c.endpoints.list[c.endpoints.curr].Priority
highestPriority := c.endpoints.list[0].Priority
@@ -122,7 +122,7 @@ mainLoop:
cli, act, err := c.newCli(ctx, e)
if err != nil {
- c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
+ c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
zap.String("endpoint", tryE),
zap.Error(err),
)
@@ -147,7 +147,7 @@ mainLoop:
c.switchLock.Unlock()
- c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC,
+ c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC,
zap.String("endpoint", tryE))
return
diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go
index de8afbfb5..eafa097e9 100644
--- a/pkg/morph/client/netmap/client.go
+++ b/pkg/morph/client/netmap/client.go
@@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'netmap' contract client: %w", err)
+ return nil, fmt.Errorf("can't create netmap static client: %w", err)
}
return &Client{client: sc}, nil
@@ -65,7 +65,15 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ *o = append(*o, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go
index 3f6aed506..2d19a8193 100644
--- a/pkg/morph/client/netmap/config.go
+++ b/pkg/morph/client/netmap/config.go
@@ -1,7 +1,7 @@
package netmap
import (
- "context"
+ "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -24,45 +24,75 @@ const (
// MaxObjectSize receives max object size configuration
// value through the Netmap contract call.
-func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, MaxObjectSizeConfig)
+func (c *Client) MaxObjectSize() (uint64, error) {
+ objectSize, err := c.readUInt64Config(MaxObjectSizeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err)
+ }
+
+ return objectSize, nil
}
// EpochDuration returns number of sidechain blocks per one FrostFS epoch.
-func (c *Client) EpochDuration(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, EpochDurationConfig)
+func (c *Client) EpochDuration() (uint64, error) {
+ epochDuration, err := c.readUInt64Config(EpochDurationConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err)
+ }
+
+ return epochDuration, nil
}
// ContainerFee returns fee paid by container owner to each alphabet node
// for container registration.
-func (c *Client) ContainerFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, ContainerFeeConfig)
+func (c *Client) ContainerFee() (uint64, error) {
+ fee, err := c.readUInt64Config(ContainerFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err)
+ }
+
+ return fee, nil
}
// ContainerAliasFee returns additional fee paid by container owner to each
// alphabet node for container nice name registration.
-func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, ContainerAliasFeeConfig)
+func (c *Client) ContainerAliasFee() (uint64, error) {
+ fee, err := c.readUInt64Config(ContainerAliasFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err)
+ }
+
+ return fee, nil
}
// HomomorphicHashDisabled returns global configuration value of homomorphic hashing
// settings.
//
// Returns (false, nil) if config key is not found in the contract.
-func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) {
- return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey)
+func (c *Client) HomomorphicHashDisabled() (bool, error) {
+ return c.readBoolConfig(HomomorphicHashingDisabledKey)
}
// InnerRingCandidateFee returns global configuration value of fee paid by
// node to be in inner ring candidates list.
-func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, IrCandidateFeeConfig)
+func (c *Client) InnerRingCandidateFee() (uint64, error) {
+ fee, err := c.readUInt64Config(IrCandidateFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err)
+ }
+
+ return fee, nil
}
// WithdrawFee returns global configuration value of fee paid by user to
// withdraw assets from FrostFS contract.
-func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, WithdrawFeeConfig)
+func (c *Client) WithdrawFee() (uint64, error) {
+ fee, err := c.readUInt64Config(WithdrawFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err)
+ }
+
+ return fee, nil
}
// MaintenanceModeAllowed reads admission of "maintenance" state from the
@@ -70,32 +100,34 @@ func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
// that storage nodes are allowed to switch their state to "maintenance".
//
// By default, maintenance state is disallowed.
-func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) {
- return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig)
+func (c *Client) MaintenanceModeAllowed() (bool, error) {
+ return c.readBoolConfig(MaintenanceModeAllowedConfig)
}
-func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) {
- v, err := c.config(ctx, []byte(key))
- if err != nil {
- return 0, fmt.Errorf("read netconfig value '%s': %w", key, err)
- }
-
- bi, err := v.TryInteger()
+func (c *Client) readUInt64Config(key string) (uint64, error) {
+ v, err := c.config([]byte(key), IntegerAssert)
if err != nil {
return 0, err
}
- return bi.Uint64(), nil
+
+ // IntegerAssert is guaranteed to return int64 if the error is nil.
+ return uint64(v.(int64)), nil
}
// reads boolean value by the given key from the FrostFS network configuration
// stored in the Sidechain. Returns false if key is not presented.
-func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) {
- v, err := c.config(ctx, []byte(key))
+func (c *Client) readBoolConfig(key string) (bool, error) {
+ v, err := c.config([]byte(key), BoolAssert)
if err != nil {
- return false, fmt.Errorf("read netconfig value '%s': %w", key, err)
+ if errors.Is(err, ErrConfigNotFound) {
+ return false, nil
+ }
+
+ return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err)
}
- return v.TryBool()
+ // BoolAssert is guaranteed to return bool if the error is nil.
+ return v.(bool), nil
}
// SetConfigPrm groups parameters of SetConfig operation.
@@ -123,13 +155,13 @@ func (s *SetConfigPrm) SetValue(value any) {
}
// SetConfig sets config field.
-func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error {
+func (c *Client) SetConfig(p SetConfigPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(setConfigMethod)
prm.SetArgs(p.id, p.key, p.value)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
@@ -166,14 +198,14 @@ type NetworkConfiguration struct {
}
// ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain.
-func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) {
+func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
var res NetworkConfiguration
prm := client.TestInvokePrm{}
prm.SetMethod(configListMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return res, fmt.Errorf("test invoke (%s): %w",
+ return res, fmt.Errorf("could not perform test invocation (%s): %w",
configListMethod, err)
}
@@ -244,18 +276,22 @@ func bytesToBool(val []byte) bool {
return false
}
+// ErrConfigNotFound is returned when the requested key was not found
+// in the network config (returned value is `Null`).
+var ErrConfigNotFound = errors.New("config value not found")
+
// config performs the test invoke of get config value
// method of FrostFS Netmap contract.
//
// Returns ErrConfigNotFound if config key is not found in the contract.
-func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) {
+func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(configMethod)
prm.SetArgs(key)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
configMethod, err)
}
@@ -264,7 +300,26 @@ func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error)
configMethod, ln)
}
- return items[0], nil
+ if _, ok := items[0].(stackitem.Null); ok {
+ return nil, ErrConfigNotFound
+ }
+
+ return assert(items[0])
+}
+
+// IntegerAssert converts stack item to int64.
+func IntegerAssert(item stackitem.Item) (any, error) {
+ return client.IntFromStackItem(item)
+}
+
+// StringAssert converts stack item to string.
+func StringAssert(item stackitem.Item) (any, error) {
+ return client.StringFromStackItem(item)
+}
+
+// BoolAssert converts stack item to bool.
+func BoolAssert(item stackitem.Item) (any, error) {
+ return client.BoolFromStackItem(item)
}
// iterateRecords iterates over all config records and passes them to f.
diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go
index 8561329ec..92d569ae2 100644
--- a/pkg/morph/client/netmap/epoch.go
+++ b/pkg/morph/client/netmap/epoch.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,13 +8,13 @@ import (
// Epoch receives number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) Epoch(ctx context.Context) (uint64, error) {
+func (c *Client) Epoch() (uint64, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(epochMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w",
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w",
epochMethod, err)
}
@@ -26,20 +25,20 @@ func (c *Client) Epoch(ctx context.Context) (uint64, error) {
num, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err)
+ return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err)
}
return uint64(num), nil
}
// LastEpochBlock receives block number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
+func (c *Client) LastEpochBlock() (uint32, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(lastEpochBlockMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w",
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w",
lastEpochBlockMethod, err)
}
@@ -50,7 +49,7 @@ func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
block, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("get number from stack item (%s): %w",
+ return 0, fmt.Errorf("could not get number from stack item (%s): %w",
lastEpochBlockMethod, err)
}
return uint32(block), nil
diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go
index 0e1f9186b..d6f8c56b2 100644
--- a/pkg/morph/client/netmap/innerring.go
+++ b/pkg/morph/client/netmap/innerring.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"crypto/elliptic"
"fmt"
@@ -24,7 +23,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) {
}
// UpdateInnerRing updates inner ring keys.
-func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
+func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
args := make([][]byte, len(p.keys))
for i := range args {
args[i] = p.keys[i].Bytes()
@@ -35,18 +34,18 @@ func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
prm.SetArgs(args)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
// GetInnerRingList return current IR list.
-func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) {
+func (c *Client) GetInnerRingList() (keys.PublicKeys, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(innerRingListMethod)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err)
}
return irKeysFromStackItem(prms, innerRingListMethod)
@@ -59,7 +58,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys
irs, err := client.ArrayFromStackItem(stack[0])
if err != nil {
- return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err)
+ return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
}
irKeys := make(keys.PublicKeys, len(irs))
@@ -79,7 +78,7 @@ const irNodeFixedPrmNumber = 1
func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
prms, err := client.ArrayFromStackItem(prm)
if err != nil {
- return nil, fmt.Errorf("get stack item array (IRNode): %w", err)
+ return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err)
} else if ln := len(prms); ln != irNodeFixedPrmNumber {
return nil, fmt.Errorf(
"unexpected stack item count (IRNode): expected %d, has %d",
@@ -90,7 +89,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
byteKey, err := client.BytesFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err)
+ return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err)
}
return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256())
diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go
index 97782fc25..f7b5c3ba4 100644
--- a/pkg/morph/client/netmap/netmap.go
+++ b/pkg/morph/client/netmap/netmap.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
@@ -12,14 +11,14 @@ import (
// GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and
// decodes netmap.NetMap from the response.
-func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(epochSnapshotMethod)
invokePrm.SetArgs(epoch)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
epochSnapshotMethod, err)
}
@@ -35,13 +34,13 @@ func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.Ne
// GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo
// from the response.
-func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
+func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapCandidatesMethod)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err)
}
if len(res) > 0 {
@@ -52,13 +51,13 @@ func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
}
// NetMap calls "netmap" method and decode netmap.NetMap from the response.
-func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) {
+func (c *Client) NetMap() (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapMethod)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
netMapMethod, err)
}
diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go
index 341b20935..ded386c86 100644
--- a/pkg/morph/client/netmap/new_epoch.go
+++ b/pkg/morph/client/netmap/new_epoch.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,14 +8,14 @@ import (
// NewEpoch updates FrostFS epoch number through
// Netmap contract call.
-func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error {
+func (c *Client) NewEpoch(epoch uint64) error {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
if err != nil {
- return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
+ return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
}
return nil
}
@@ -25,16 +24,16 @@ func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error {
// control notary transaction internally to ensure all
// nodes produce the same transaction with high probability.
// If vub > 0, vub will be used as valid until block value.
-func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) {
+func (c *Client) NewEpochControl(epoch uint64, vub uint32) (uint32, error) {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
prm.SetControlTX(true)
prm.SetVUB(vub)
- res, err := c.client.Invoke(ctx, prm)
+ res, err := c.client.Invoke(prm)
if err != nil {
- return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
+ return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
}
return res.VUB, nil
}
diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go
index e83acde39..764bbc899 100644
--- a/pkg/morph/client/netmap/peer.go
+++ b/pkg/morph/client/netmap/peer.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"errors"
"fmt"
@@ -25,7 +24,7 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) {
// AddPeer registers peer in FrostFS network through
// Netmap contract call.
-func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
+func (c *Client) AddPeer(p AddPeerPrm) error {
method := addPeerMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -40,15 +39,15 @@ func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
prm.SetArgs(p.nodeInfo.Marshal())
prm.InvokePrmOptional = p.InvokePrmOptional
- if _, err := c.client.Invoke(ctx, prm); err != nil {
- return fmt.Errorf("invoke method (%s): %w", method, err)
+ if _, err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", method, err)
}
return nil
}
// ForceRemovePeer marks the given peer as offline via a notary control transaction.
// If vub > 0, vub will be used as valid until block value.
-func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
+func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
if !c.client.WithNotary() {
return 0, errFailedToRemovePeerWithoutNotary
}
@@ -58,9 +57,9 @@ func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo,
prm.SetControlTX(true)
prm.SetVUB(vub)
- res, err := c.UpdatePeerState(ctx, prm)
+ vub, err := c.UpdatePeerState(prm)
if err != nil {
return 0, fmt.Errorf("updating peer state: %v", err)
}
- return res.VUB, nil
+ return vub, nil
}
diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go
index 9dbec1a90..ba2c26af7 100644
--- a/pkg/morph/client/netmap/snapshot.go
+++ b/pkg/morph/client/netmap/snapshot.go
@@ -1,22 +1,19 @@
package netmap
import (
- "context"
- "fmt"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
// GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response.
-func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(snapshotMethod)
prm.SetArgs(diff)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err)
+ return nil, err
}
return DecodeNetMap(res)
diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go
index f9f639c19..7c3a4e8cd 100644
--- a/pkg/morph/client/netmap/update_state.go
+++ b/pkg/morph/client/netmap/update_state.go
@@ -1,7 +1,7 @@
package netmap
import (
- "context"
+ "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -36,7 +36,7 @@ func (u *UpdatePeerPrm) SetMaintenance() {
}
// UpdatePeerState changes peer status through Netmap contract call.
-func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) {
+func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) {
method := updateStateMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -55,5 +55,9 @@ func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.I
prm.SetArgs(int64(p.state), p.key)
prm.InvokePrmOptional = p.InvokePrmOptional
- return c.client.Invoke(ctx, prm)
+ res, err := c.client.Invoke(prm)
+ if err != nil {
+ return 0, fmt.Errorf("could not invoke smart contract: %w", err)
+ }
+ return res.VUB, nil
}
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index bc00eb889..218f7ad8e 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -8,12 +8,14 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
)
const (
@@ -35,8 +37,12 @@ const (
NNSPolicyContractName = "policy.frostfs"
)
-// ErrNNSRecordNotFound means that there is no such record in NNS contract.
-var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
+var (
+ // ErrNNSRecordNotFound means that there is no such record in NNS contract.
+ ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
+
+ errEmptyResultStack = errors.New("returned result stack is empty")
+)
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
// based on alphabet index.
@@ -55,36 +61,97 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) {
return util.Uint160{}, ErrConnectionLost
}
- sh, err = nnsResolve(c.nnsReader, name)
+ nnsHash, err := c.NNSHash()
+ if err != nil {
+ return util.Uint160{}, err
+ }
+
+ sh, err = nnsResolve(c.client, nnsHash, name)
if err != nil {
return sh, fmt.Errorf("NNS.resolve: %w", err)
}
return sh, nil
}
-func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) {
- available, err := r.IsAvailable(domain)
- if err != nil {
- return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
+// NNSHash returns NNS contract hash.
+func (c *Client) NNSHash() (util.Uint160, error) {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ if c.inactive {
+ return util.Uint160{}, ErrConnectionLost
}
- if available {
+ success := false
+ startedAt := time.Now()
+
+ defer func() {
+ c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt))
+ }()
+
+ nnsHash := c.cache.nns()
+
+ if nnsHash == nil {
+ cs, err := c.client.GetContractStateByID(nnsContractID)
+ if err != nil {
+ return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err)
+ }
+
+ c.cache.setNNSHash(cs.Hash)
+ nnsHash = &cs.Hash
+ }
+ success = true
+ return *nnsHash, nil
+}
+
+func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
+ found, err := exists(c, nnsHash, domain)
+ if err != nil {
+ return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err)
+ }
+
+ if !found {
return nil, ErrNNSRecordNotFound
}
- return r.Resolve(domain, big.NewInt(int64(nns.TXT)))
+ result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{
+ {
+ Type: smartcontract.StringType,
+ Value: domain,
+ },
+ {
+ Type: smartcontract.IntegerType,
+ Value: big.NewInt(int64(nns.TXT)),
+ },
+ }, nil)
+ if err != nil {
+ return nil, err
+ }
+ if result.State != vmstate.Halt.String() {
+ return nil, fmt.Errorf("invocation failed: %s", result.FaultException)
+ }
+ if len(result.Stack) == 0 {
+ return nil, errEmptyResultStack
+ }
+ return result.Stack[0], nil
}
-func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) {
- arr, err := nnsResolveItem(r, domain)
+func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) {
+ res, err := nnsResolveItem(c, nnsHash, domain)
if err != nil {
return util.Uint160{}, err
}
- if len(arr) == 0 {
- return util.Uint160{}, errors.New("NNS record is missing")
+ // Parse the result of resolving NNS record.
+ // It works with multiple formats (corresponding to multiple NNS versions).
+ // If array of hashes is provided, it returns only the first one.
+ if arr, ok := res.Value().([]stackitem.Item); ok {
+ if len(arr) == 0 {
+ return util.Uint160{}, errors.New("NNS record is missing")
+ }
+ res = arr[0]
}
- bs, err := arr[0].TryBytes()
+ bs, err := res.TryBytes()
if err != nil {
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
}
@@ -104,6 +171,33 @@ func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error
return util.Uint160{}, errors.New("no valid hashes are found")
}
+func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) {
+ result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{
+ {
+ Type: smartcontract.StringType,
+ Value: domain,
+ },
+ }, nil)
+ if err != nil {
+ return false, err
+ }
+
+ if len(result.Stack) == 0 {
+ return false, errEmptyResultStack
+ }
+
+ res := result.Stack[0]
+
+ available, err := res.TryBool()
+ if err != nil {
+ return false, fmt.Errorf("malformed response: %w", err)
+ }
+
+ // not available means that it is taken
+ // and, therefore, exists
+ return !available, nil
+}
+
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
// Should be called for side-chain client only.
func (c *Client) SetGroupSignerScope() error {
@@ -147,12 +241,18 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
return gKey, nil
}
- arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName)
+ nnsHash, err := c.NNSHash()
if err != nil {
return nil, err
}
- if len(arr) == 0 {
+ item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName)
+ if err != nil {
+ return nil, err
+ }
+
+ arr, ok := item.Value().([]stackitem.Item)
+ if !ok || len(arr) == 0 {
return nil, errors.New("NNS record is missing")
}
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 448702613..2a500b31b 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -1,7 +1,6 @@
package client
import (
- "context"
"crypto/elliptic"
"encoding/binary"
"errors"
@@ -38,7 +37,8 @@ type (
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
- proxy util.Uint160
+ notary util.Uint160
+ proxy util.Uint160
}
notaryCfg struct {
@@ -57,11 +57,16 @@ const (
defaultNotaryValidTime = 50
defaultNotaryRoundTime = 100
- setDesignateMethod = "designateAsRole"
+ notaryBalanceOfMethod = "balanceOf"
+ notaryExpirationOfMethod = "expirationOf"
+ setDesignateMethod = "designateAsRole"
+ notaryBalanceErrMsg = "can't fetch notary balance"
notaryNotEnabledPanicMsg = "notary support was not enabled on this client"
)
+var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack")
+
func defaultNotaryConfig(c *Client) *notaryCfg {
return ¬aryCfg{
txValidTime: defaultNotaryValidTime,
@@ -101,6 +106,7 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error {
txValidTime: cfg.txValidTime,
roundTime: cfg.roundTime,
alphabetSource: cfg.alphabetSource,
+ notary: notary.Hash,
}
c.notary = notaryCfg
@@ -134,7 +140,7 @@ func (c *Client) ProbeNotary() (res bool) {
// use this function.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) {
+func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -148,17 +154,16 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err)
+ return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err)
}
- r := notary.NewReader(c.rpcActor)
- currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash())
+ currentTill, err := c.depositExpirationOf()
if err != nil {
- return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err)
+ return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err)
}
- till := max(int64(bc+delta), int64(currentTill))
- res, _, err := c.depositNotary(ctx, amount, till)
+ till := max(int64(bc+delta), currentTill)
+ res, _, err := c.depositNotary(amount, till)
return res, err
}
@@ -167,7 +172,7 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta
// This allows to avoid ValidAfterDeposit failures.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) {
+func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (util.Uint256, uint32, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -180,23 +185,23 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8)
}
// till value refers to a block height and it is uint32 value in neo-go
- return c.depositNotary(ctx, amount, math.MaxUint32)
+ return c.depositNotary(amount, math.MaxUint32)
}
-func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
+func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
txHash, vub, err := c.gasToken.Transfer(
c.accAddr,
- notary.Hash,
+ c.notary.notary,
big.NewInt(int64(amount)),
[]any{c.acc.PrivateKey().GetScriptHash(), till})
if err != nil {
if !errors.Is(err, neorpc.ErrAlreadyExists) {
- return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err)
+ return util.Uint256{}, 0, fmt.Errorf("can't make notary deposit: %w", err)
}
// Transaction is already in mempool waiting to be processed.
// This is an expected situation if we restart the service.
- c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade,
+ c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
@@ -204,7 +209,7 @@ func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till i
return util.Uint256{}, 0, nil
}
- c.logger.Info(ctx, logs.ClientNotaryDepositInvoke,
+ c.logger.Info(logs.ClientNotaryDepositInvoke,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
@@ -231,10 +236,18 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) {
sh := c.acc.PrivateKey().PublicKey().GetScriptHash()
- r := notary.NewReader(c.rpcActor)
- bigIntDeposit, err := r.BalanceOf(sh)
+ items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh)
if err != nil {
- return 0, fmt.Errorf("get notary deposit: %w", err)
+ return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)
+ }
+
+ if len(items) != 1 {
+ return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems))
+ }
+
+ bigIntDeposit, err := items[0].TryInteger()
+ if err != nil {
+ return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err))
}
return bigIntDeposit.Int64(), nil
@@ -261,7 +274,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) {
// committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error {
+func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -275,11 +288,10 @@ func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm)
nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
- ctx,
setDesignateMethod,
nonce,
vub,
@@ -310,7 +322,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) {
// Requires committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error {
+func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -324,11 +336,10 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet
nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
- ctx,
setDesignateMethod,
nonce,
vub,
@@ -344,19 +355,19 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet
// Returns valid until block value.
//
// `nonce` and `vub` are used only if notary is enabled.
-func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return 0, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(ctx, contract, fee, method, args...)
+ return c.Invoke(contract, fee, method, args...)
}
- return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...)
+ return c.notaryInvoke(false, true, contract, nonce, vub, method, args...)
}
// NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's
@@ -364,19 +375,19 @@ func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fi
// not expected to be signed by the current node.
//
// Considered to be used by non-IR nodes.
-func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return 0, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(ctx, contract, fee, method, args...)
+ return c.Invoke(contract, fee, method, args...)
}
- return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...)
+ return c.notaryInvoke(false, false, contract, rand.Uint32(), vubP, method, args...)
}
// NotarySignAndInvokeTX signs and sends notary request that was received from
@@ -393,7 +404,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return fmt.Errorf("fetch current alphabet keys: %w", err)
+ return fmt.Errorf("could not fetch current alphabet keys: %w", err)
}
cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList)
@@ -418,7 +429,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return err
}
- c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked,
+ c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked,
zap.String("tx_hash", mainH.StringLE()),
zap.Uint32("valid_until_block", untilActual),
zap.String("fallback_hash", fbH.StringLE()))
@@ -426,13 +437,13 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return nil
}
-func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error {
+func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error {
designate := c.GetDesignateHash()
- _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...)
+ _, err := c.notaryInvoke(true, true, designate, nonce, &vub, method, args...)
return err
}
-func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) {
start := time.Now()
success := false
defer func() {
@@ -441,27 +452,27 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return InvokeRes{}, err
+ return 0, err
}
until, err := c.getUntilValue(vub)
if err != nil {
- return InvokeRes{}, err
+ return 0, err
}
cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee)
if err != nil {
- return InvokeRes{}, err
+ return 0, err
}
nAct, err := notary.NewActor(c.client, cosigners, c.acc)
if err != nil {
- return InvokeRes{}, err
+ return 0, err
}
mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != vmstate.Halt.String() {
- return ¬HaltStateError{state: r.State, exception: r.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
}
t.ValidUntilBlock = until
@@ -471,17 +482,17 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo
}, args...))
if err != nil && !alreadyOnChainError(err) {
- return InvokeRes{}, err
+ return 0, err
}
- c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked,
+ c.logger.Debug(logs.ClientNotaryRequestInvoked,
zap.String("method", method),
zap.Uint32("valid_until_block", untilActual),
zap.String("tx_hash", mainH.StringLE()),
zap.String("fallback_hash", fbH.StringLE()))
success = true
- return InvokeRes{Hash: mainH, VUB: until}, nil
+ return until, nil
}
func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) {
@@ -515,24 +526,24 @@ func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabet
if ok {
pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err)
+ return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err)
}
acc = notary.FakeSimpleAccount(pub)
} else {
m, pubsBytes, ok := vm.ParseMultiSigContract(script)
if !ok {
- return nil, errors.New("parse verification script of signer #2: unknown witness type")
+ return nil, errors.New("failed to parse verification script of signer #2: unknown witness type")
}
pubs := make(keys.PublicKeys, len(pubsBytes))
for i := range pubs {
pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err)
+ return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err)
}
}
acc, err = notary.FakeMultisigAccount(m, pubs)
if err != nil {
- return nil, fmt.Errorf("create fake account for signer #2: %w", err)
+ return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err)
}
}
}
@@ -608,7 +619,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey())
err := multisigAccount.ConvertMultisig(m, ir)
if err != nil {
- return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err)
+ // wrap error as FrostFS-specific since the call is not related to any client
+ return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err))
}
} else {
// alphabet multisig redeem script is
@@ -616,7 +628,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
// inner ring multiaddress witness
multisigAccount, err = notary.FakeMultisigAccount(m, ir)
if err != nil {
- return nil, fmt.Errorf("make inner ring multisig wallet: %w", err)
+ // wrap error as FrostFS-specific since the call is not related to any client
+ return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err))
}
}
@@ -626,7 +639,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
func (c *Client) notaryTxValidationLimit() (uint32, error) {
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return 0, fmt.Errorf("get current blockchain height: %w", err)
+ return 0, fmt.Errorf("can't get current blockchain height: %w", err)
}
minTime := bc + c.notary.txValidTime
@@ -635,6 +648,24 @@ func (c *Client) notaryTxValidationLimit() (uint32, error) {
return rounded, nil
}
+func (c *Client) depositExpirationOf() (int64, error) {
+ expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash())
+ if err != nil {
+ return 0, fmt.Errorf("can't invoke method: %w", err)
+ }
+
+ if len(expirationRes) != 1 {
+ return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes))
+ }
+
+ currentTillBig, err := expirationRes[0].TryInteger()
+ if err != nil {
+ return 0, fmt.Errorf("can't parse deposit till value: %w", err)
+ }
+
+ return currentTillBig.Int64(), nil
+}
+
// sigCount returns the number of required signature.
// For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT).
// If committee is true, returns M as N/2+1.
@@ -708,12 +739,12 @@ func alreadyOnChainError(err error) bool {
func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) {
notaryBalance, err := c.GetNotaryDeposit()
if err != nil {
- return 0, fmt.Errorf("get notary balance: %w", err)
+ return 0, fmt.Errorf("could not get notary balance: %w", err)
}
gasBalance, err := c.GasBalance()
if err != nil {
- return 0, fmt.Errorf("get GAS balance: %w", err)
+ return 0, fmt.Errorf("could not get GAS balance: %w", err)
}
if gasBalance == 0 {
@@ -762,12 +793,12 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool)
if hash != nil {
height, err = c.getTransactionHeight(*hash)
if err != nil {
- return 0, 0, fmt.Errorf("get transaction height: %w", err)
+ return 0, 0, fmt.Errorf("could not get transaction height: %w", err)
}
} else {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- return 0, 0, fmt.Errorf("get chain height: %w", err)
+ return 0, 0, fmt.Errorf("could not get chain height: %w", err)
}
}
diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go
index c4eb120d2..dfcf62b83 100644
--- a/pkg/morph/client/static.go
+++ b/pkg/morph/client/static.go
@@ -1,10 +1,8 @@
package client
import (
- "context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -130,8 +128,7 @@ func (i *InvokePrmOptional) SetVUB(v uint32) {
}
type InvokeRes struct {
- Hash util.Uint256
- VUB uint32
+ VUB uint32
}
// Invoke calls Invoke method of Client with static internal script hash and fee.
@@ -143,7 +140,9 @@ type InvokeRes struct {
//
// If fee for the operation executed using specified method is customized, then StaticClient uses it.
// Otherwise, default fee is used.
-func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) {
+func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) {
+ var res InvokeRes
+ var err error
var vubP *uint32
if s.tryNotary {
if s.alpha {
@@ -160,7 +159,7 @@ func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, err
nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash)
}
if err != nil {
- return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err)
+ return InvokeRes{}, fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err)
}
vubP = &vub
@@ -170,23 +169,25 @@ func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, err
vubP = &prm.vub
}
- return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
+ res.VUB, err = s.client.NotaryInvoke(s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
+ return res, err
}
if prm.vub > 0 {
vubP = &prm.vub
}
- return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
+ res.VUB, err = s.client.NotaryInvokeNotAlpha(s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
+ return res, err
}
- return s.client.Invoke(
- ctx,
+ res.VUB, err = s.client.Invoke(
s.scScriptHash,
s.fee,
prm.method,
prm.args...,
)
+ return res, err
}
// TestInvokePrm groups parameters of the TestInvoke operation.
@@ -206,9 +207,7 @@ func (ti *TestInvokePrm) SetArgs(args ...any) {
}
// TestInvoke calls TestInvoke method of Client with static internal script hash.
-func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) {
- _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method)
- defer span.End()
+func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) {
return s.client.TestInvoke(
s.scScriptHash,
prm.method,
diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go
index f7b6705a8..cd55d6bd2 100644
--- a/pkg/morph/client/util.go
+++ b/pkg/morph/client/util.go
@@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) {
case stackitem.IntegerT:
n, err := param.TryInteger()
if err != nil {
- return nil, fmt.Errorf("parse integer bytes: %w", err)
+ return nil, fmt.Errorf("can't parse integer bytes: %w", err)
}
return n.Bytes(), nil
@@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) {
func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error {
return func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != HaltState {
- return ¬HaltStateError{state: r.State, exception: r.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
}
t.SystemFee += add
diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go
deleted file mode 100644
index 87fcf84b8..000000000
--- a/pkg/morph/client/waiter.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package client
-
-import (
- "context"
- "fmt"
-
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
-)
-
-type waiterClient struct {
- c *Client
-}
-
-func (w *waiterClient) Context() context.Context {
- return context.Background()
-}
-
-func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
- return w.c.GetApplicationLog(hash, trig)
-}
-
-func (w *waiterClient) GetBlockCount() (uint32, error) {
- return w.c.BlockCount()
-}
-
-func (w *waiterClient) GetVersion() (*result.Version, error) {
- return w.c.GetVersion()
-}
-
-// WaitTxHalt waits until transaction with the specified hash persists on the blockchain.
-// It also checks execution result to finish in HALT state.
-func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error {
- w, err := waiter.NewPollingBased(&waiterClient{c: c})
- if err != nil {
- return fmt.Errorf("create tx waiter: %w", err)
- }
-
- res, err := w.WaitAny(ctx, vub, h)
- if err != nil {
- return fmt.Errorf("wait until tx persists: %w", err)
- }
-
- if res.VMState.HasFlag(vmstate.Halt) {
- return nil
- }
- return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}
-}
diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go
index 99f80584a..062a2a886 100644
--- a/pkg/morph/event/balance/lock.go
+++ b/pkg/morph/event/balance/lock.go
@@ -3,7 +3,7 @@ package balance
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -47,17 +47,61 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash }
// ParseLock from notification into lock structure.
func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) {
- var le balance.LockEvent
- if err := le.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse balance.LockEvent: %w", err)
+ var (
+ ev Lock
+ err error
+ )
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Lock{
- id: le.TxID,
- user: le.From,
- lock: le.To,
- amount: le.Amount.Int64(),
- until: le.Until.Int64(),
- txHash: e.Container,
- }, nil
+ if ln := len(params); ln != 5 {
+ return nil, event.WrongNumberOfParameters(5, ln)
+ }
+
+ // parse id
+ ev.id, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock id: %w", err)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock user value: %w", err)
+ }
+
+ ev.user, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err)
+ }
+
+ // parse lock account
+ lock, err := client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock account value: %w", err)
+ }
+
+ ev.lock, err = util.Uint160DecodeBytesBE(lock)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.amount, err = client.IntFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock amount: %w", err)
+ }
+
+ // parse until deadline
+ ev.until, err = client.IntFromStackItem(params[4])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock deadline: %w", err)
+ }
+
+ ev.txHash = e.Container
+
+ return ev, nil
}
diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go
index 87b91aede..9199bcd55 100644
--- a/pkg/morph/event/balance/lock_test.go
+++ b/pkg/morph/event/balance/lock_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -27,7 +28,7 @@ func TestParseLock(t *testing.T) {
}
_, err := ParseLock(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error())
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go
index d28f6d521..a206307f8 100644
--- a/pkg/morph/event/container/delete.go
+++ b/pkg/morph/event/container/delete.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -58,14 +58,28 @@ func (DeleteSuccess) MorphEvent() {}
// ParseDeleteSuccess decodes notification event thrown by Container contract into
// DeleteSuccess and returns it as event.Event.
func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- var dse container.DeleteSuccessEvent
- if err := dse.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err)
+ items, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
}
- var cnr cid.ID
- cnr.SetSHA256(dse.ContainerID)
- return DeleteSuccess{
- ID: cnr,
- }, nil
+ const expectedItemNumDeleteSuccess = 1
+
+ if ln := len(items); ln != expectedItemNumDeleteSuccess {
+ return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln)
+ }
+
+ binID, err := client.BytesFromStackItem(items[0])
+ if err != nil {
+ return nil, fmt.Errorf("parse container ID item: %w", err)
+ }
+
+ var res DeleteSuccess
+
+ err = res.ID.Decode(binID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
+ }
+
+ return res, nil
}
diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go
index 62e7d7277..627c5fcf5 100644
--- a/pkg/morph/event/container/delete_test.go
+++ b/pkg/morph/event/container/delete_test.go
@@ -4,6 +4,7 @@ import (
"crypto/sha256"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -17,7 +18,7 @@ func TestParseDeleteSuccess(t *testing.T) {
}
_, err := ParseDeleteSuccess(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
})
t.Run("wrong container parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go
index b09394ba4..335034bf3 100644
--- a/pkg/morph/event/container/put.go
+++ b/pkg/morph/event/container/put.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -78,14 +78,33 @@ func (PutSuccess) MorphEvent() {}
// ParsePutSuccess decodes notification event thrown by Container contract into
// PutSuccess and returns it as event.Event.
func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- var pse container.PutSuccessEvent
- if err := pse.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err)
+ items, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
}
- var cnr cid.ID
- cnr.SetSHA256(pse.ContainerID)
- return PutSuccess{
- ID: cnr,
- }, nil
+ const expectedItemNumPutSuccess = 2
+
+ if ln := len(items); ln != expectedItemNumPutSuccess {
+ return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln)
+ }
+
+ binID, err := client.BytesFromStackItem(items[0])
+ if err != nil {
+ return nil, fmt.Errorf("parse container ID item: %w", err)
+ }
+
+ _, err = client.BytesFromStackItem(items[1])
+ if err != nil {
+ return nil, fmt.Errorf("parse public key item: %w", err)
+ }
+
+ var res PutSuccess
+
+ err = res.ID.Decode(binID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
+ }
+
+ return res, nil
}
diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go
index dd5c7ea93..3622f9943 100644
--- a/pkg/morph/event/container/put_test.go
+++ b/pkg/morph/event/container/put_test.go
@@ -4,8 +4,8 @@ import (
"crypto/sha256"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -17,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) {
}
_, err := ParsePutSuccess(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
})
t.Run("wrong container ID parameter", func(t *testing.T) {
@@ -35,30 +35,18 @@ func TestParsePutSuccess(t *testing.T) {
id.Encode(binID)
t.Run("wrong public key parameter", func(t *testing.T) {
- t.Run("wrong type", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewMap(),
- }))
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewMap(),
+ }))
- require.Error(t, err)
- })
- t.Run("garbage data", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewByteArray([]byte("key")),
- }))
- require.Error(t, err)
- })
+ require.Error(t, err)
})
t.Run("correct behavior", func(t *testing.T) {
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(binID),
- stackitem.NewByteArray(pk.PublicKey().Bytes()),
+ stackitem.NewByteArray([]byte("key")),
}))
require.NoError(t, err)
diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go
index cf56464b8..eae2a23f5 100644
--- a/pkg/morph/event/frostfs/cheque.go
+++ b/pkg/morph/event/frostfs/cheque.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,20 +34,53 @@ func (c Cheque) LockAccount() util.Uint160 { return c.LockValue }
// ParseCheque from notification into cheque structure.
func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ce frostfs.ChequeEvent
- if err := ce.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err)
- }
+ var (
+ ev Cheque
+ err error
+ )
- lock, err := util.Uint160DecodeBytesBE(ce.LockAccount)
+ params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err)
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Cheque{
- IDValue: ce.Id,
- AmountValue: ce.Amount.Int64(),
- UserValue: ce.User,
- LockValue: lock,
- }, nil
+ if ln := len(params); ln != 4 {
+ return nil, event.WrongNumberOfParameters(4, ln)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque id: %w", err)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque user: %w", err)
+ }
+
+ ev.UserValue, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque amount: %w", err)
+ }
+
+ // parse lock account
+ lock, err := client.BytesFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque lock account: %w", err)
+ }
+
+ ev.LockValue, err = util.Uint160DecodeBytesBE(lock)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go
index d92b7922b..ab177757f 100644
--- a/pkg/morph/event/frostfs/cheque_test.go
+++ b/pkg/morph/event/frostfs/cheque_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -26,7 +27,7 @@ func TestParseCheque(t *testing.T) {
}
_, err := ParseCheque(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go
index 805e80f3c..4c87634c2 100644
--- a/pkg/morph/event/frostfs/config.go
+++ b/pkg/morph/event/frostfs/config.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -36,15 +36,39 @@ func (u Config) Key() []byte { return u.KeyValue }
func (u Config) Value() []byte { return u.ValueValue }
func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) {
- var sce frostfs.SetConfigEvent
- if err := sce.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err)
+ var (
+ ev Config
+ err error
+ )
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Config{
- KeyValue: sce.Key,
- ValueValue: sce.Value,
- IDValue: sce.Id,
- TxHashValue: e.Container,
- }, nil
+ if ln := len(params); ln != 3 {
+ return nil, event.WrongNumberOfParameters(3, ln)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config update id: %w", err)
+ }
+
+ // parse key
+ ev.KeyValue, err = client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config key: %w", err)
+ }
+
+ // parse value
+ ev.ValueValue, err = client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config value: %w", err)
+ }
+
+ ev.TxHashValue = e.Container
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go
index 8acc8c15c..dcd4201e4 100644
--- a/pkg/morph/event/frostfs/config_test.go
+++ b/pkg/morph/event/frostfs/config_test.go
@@ -3,6 +3,7 @@ package frostfs
import (
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -20,7 +21,7 @@ func TestParseConfig(t *testing.T) {
}
_, err := ParseConfig(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
})
t.Run("wrong first parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go
index fcb01577e..d8a3b82f0 100644
--- a/pkg/morph/event/frostfs/deposit.go
+++ b/pkg/morph/event/frostfs/deposit.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,15 +34,50 @@ func (d Deposit) Amount() int64 { return d.AmountValue }
// ParseDeposit notification into deposit structure.
func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) {
- var de frostfs.DepositEvent
- if err := de.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err)
+ var ev Deposit
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Deposit{
- IDValue: de.TxHash[:],
- AmountValue: de.Amount.Int64(),
- FromValue: de.From,
- ToValue: de.Receiver,
- }, nil
+ if ln := len(params); ln != 4 {
+ return nil, event.WrongNumberOfParameters(4, ln)
+ }
+
+ // parse from
+ from, err := client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit sender: %w", err)
+ }
+
+ ev.FromValue, err = util.Uint160DecodeBytesBE(from)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit amount: %w", err)
+ }
+
+ // parse to
+ to, err := client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit receiver: %w", err)
+ }
+
+ ev.ToValue, err = util.Uint160DecodeBytesBE(to)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit id: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go
index 38d3e61f6..f279a7f9c 100644
--- a/pkg/morph/event/frostfs/deposit_test.go
+++ b/pkg/morph/event/frostfs/deposit_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -11,7 +12,7 @@ import (
func TestParseDeposit(t *testing.T) {
var (
- id = util.Uint256{0, 1, 2, 3}
+ id = []byte("Hello World")
from = util.Uint160{0x1, 0x2, 0x3}
to = util.Uint160{0x3, 0x2, 0x1}
@@ -25,7 +26,7 @@ func TestParseDeposit(t *testing.T) {
}
_, err := ParseDeposit(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
})
t.Run("wrong from parameter", func(t *testing.T) {
@@ -71,12 +72,12 @@ func TestParseDeposit(t *testing.T) {
stackitem.NewByteArray(from.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
stackitem.NewByteArray(to.BytesBE()),
- stackitem.NewByteArray(id[:]),
+ stackitem.NewByteArray(id),
}))
require.NoError(t, err)
require.Equal(t, Deposit{
- IDValue: id[:],
+ IDValue: id,
AmountValue: amount,
FromValue: from,
ToValue: to,
diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go
new file mode 100644
index 000000000..62203540f
--- /dev/null
+++ b/pkg/morph/event/frostfs/ir_update.go
@@ -0,0 +1,54 @@
+package frostfs
+
+import (
+ "crypto/elliptic"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+type UpdateInnerRing struct {
+ keys []*keys.PublicKey
+}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (UpdateInnerRing) MorphEvent() {}
+
+func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys }
+
+func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) {
+ var (
+ ev UpdateInnerRing
+ err error
+ )
+
+ if ln := len(params); ln != 1 {
+ return nil, event.WrongNumberOfParameters(1, ln)
+ }
+
+ // parse keys
+ irKeys, err := client.ArrayFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get updated inner ring keys: %w", err)
+ }
+
+ ev.keys = make([]*keys.PublicKey, 0, len(irKeys))
+ for i := range irKeys {
+ rawKey, err := client.BytesFromStackItem(irKeys[i])
+ if err != nil {
+ return nil, fmt.Errorf("could not get updated inner ring public key: %w", err)
+ }
+
+ key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err)
+ }
+
+ ev.keys = append(ev.keys, key)
+ }
+
+ return ev, nil
+}
diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go
new file mode 100644
index 000000000..fae87e5f9
--- /dev/null
+++ b/pkg/morph/event/frostfs/ir_update_test.go
@@ -0,0 +1,57 @@
+package frostfs
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/stretchr/testify/require"
+)
+
+func genKey(t *testing.T) *keys.PrivateKey {
+ priv, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ return priv
+}
+
+func TestParseUpdateInnerRing(t *testing.T) {
+ publicKeys := []*keys.PublicKey{
+ genKey(t).PublicKey(),
+ genKey(t).PublicKey(),
+ genKey(t).PublicKey(),
+ }
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseUpdateInnerRing(prms)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ })
+
+ t.Run("wrong first parameter", func(t *testing.T) {
+ _, err := ParseUpdateInnerRing([]stackitem.Item{
+ stackitem.NewMap(),
+ })
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct", func(t *testing.T) {
+ ev, err := ParseUpdateInnerRing([]stackitem.Item{
+ stackitem.NewArray([]stackitem.Item{
+ stackitem.NewByteArray(publicKeys[0].Bytes()),
+ stackitem.NewByteArray(publicKeys[1].Bytes()),
+ stackitem.NewByteArray(publicKeys[2].Bytes()),
+ }),
+ })
+ require.NoError(t, err)
+
+ require.Equal(t, UpdateInnerRing{
+ keys: publicKeys,
+ }, ev)
+ })
+}
diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go
index 2568b6512..f48067f86 100644
--- a/pkg/morph/event/frostfs/withdraw.go
+++ b/pkg/morph/event/frostfs/withdraw.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -30,14 +30,39 @@ func (w Withdraw) Amount() int64 { return w.AmountValue }
// ParseWithdraw notification into withdraw structure.
func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) {
- var we frostfs.WithdrawEvent
- if err := we.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err)
+ var ev Withdraw
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Withdraw{
- IDValue: we.TxHash[:],
- AmountValue: we.Amount.Int64(),
- UserValue: we.User,
- }, nil
+ if ln := len(params); ln != 3 {
+ return nil, event.WrongNumberOfParameters(3, ln)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw user: %w", err)
+ }
+
+ ev.UserValue, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw amount: %w", err)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw id: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go
index e382305e6..33435d19a 100644
--- a/pkg/morph/event/frostfs/withdraw_test.go
+++ b/pkg/morph/event/frostfs/withdraw_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -11,7 +12,7 @@ import (
func TestParseWithdraw(t *testing.T) {
var (
- id = util.Uint256{1, 2, 3}
+ id = []byte("Hello World")
user = util.Uint160{0x1, 0x2, 0x3}
amount int64 = 10
@@ -24,7 +25,7 @@ func TestParseWithdraw(t *testing.T) {
}
_, err := ParseWithdraw(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
})
t.Run("wrong user parameter", func(t *testing.T) {
@@ -58,12 +59,12 @@ func TestParseWithdraw(t *testing.T) {
ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(user.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
- stackitem.NewByteArray(id[:]),
+ stackitem.NewByteArray(id),
}))
require.NoError(t, err)
require.Equal(t, Withdraw{
- IDValue: id[:],
+ IDValue: id,
AmountValue: amount,
UserValue: user,
}, ev)
diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go
index 55a514ff1..182b4667e 100644
--- a/pkg/morph/event/handlers.go
+++ b/pkg/morph/event/handlers.go
@@ -1,26 +1,32 @@
package event
import (
- "context"
-
"github.com/nspcc-dev/neo-go/pkg/core/block"
- "github.com/nspcc-dev/neo-go/pkg/util"
)
// Handler is an Event processing function.
-type Handler func(context.Context, Event)
+type Handler func(Event)
// BlockHandler is a chain block processing function.
-type BlockHandler func(context.Context, *block.Block)
+type BlockHandler func(*block.Block)
// NotificationHandlerInfo is a structure that groups
// the parameters of the handler of particular
// contract event.
type NotificationHandlerInfo struct {
- Contract util.Uint160
- Type Type
- Parser NotificationParser
- Handlers []Handler
+ scriptHashWithType
+
+ h Handler
+}
+
+// SetHandler is an event handler setter.
+func (s *NotificationHandlerInfo) SetHandler(v Handler) {
+ s.h = v
+}
+
+// Handler returns an event handler.
+func (s NotificationHandlerInfo) Handler() Handler {
+ return s.h
}
// NotaryHandlerInfo is a structure that groups
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index e5cdfeef7..dd3c7d216 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -33,6 +33,13 @@ type Listener interface {
// it could not be started.
ListenWithError(context.Context, chan<- error)
+ // SetNotificationParser must set the parser of particular contract event.
+ //
+ // Parser of each event must be set once. All parsers must be set before Listen call.
+ //
+ // Must ignore nil parsers and all calls after listener has been started.
+ SetNotificationParser(NotificationParserInfo)
+
// RegisterNotificationHandler must register the event handler for particular notification event of contract.
//
// The specified handler must be called after each capture and parsing of the event.
@@ -93,6 +100,8 @@ type listener struct {
startOnce, stopOnce sync.Once
+ started bool
+
notificationParsers map[scriptHashWithType]NotificationParser
notificationHandlers map[scriptHashWithType][]Handler
@@ -111,7 +120,7 @@ type listener struct {
pool *ants.Pool
}
-const newListenerFailMsg = "instantiate Listener"
+const newListenerFailMsg = "could not instantiate Listener"
var (
errNilLogger = errors.New("nil logger")
@@ -134,8 +143,11 @@ func (l *listener) Listen(ctx context.Context) {
l.startOnce.Do(func() {
l.wg.Add(1)
defer l.wg.Done()
-
- l.listen(ctx, nil)
+ if err := l.listen(ctx, nil); err != nil {
+ l.log.Error(logs.EventCouldNotStartListenToEvents,
+ zap.String("error", err.Error()),
+ )
+ }
})
}
@@ -149,17 +161,26 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
l.startOnce.Do(func() {
l.wg.Add(1)
defer l.wg.Done()
-
- l.listen(ctx, intError)
+ if err := l.listen(ctx, intError); err != nil {
+ l.log.Error(logs.EventCouldNotStartListenToEvents,
+ zap.String("error", err.Error()),
+ )
+ l.sendError(ctx, intError, err)
+ }
})
}
-func (l *listener) listen(ctx context.Context, intError chan<- error) {
+func (l *listener) listen(ctx context.Context, intError chan<- error) error {
+ // mark listener as started
+ l.started = true
+
subErrCh := make(chan error)
go l.subscribe(subErrCh)
l.listenLoop(ctx, intError, subErrCh)
+
+ return nil
}
func (l *listener) subscribe(errCh chan error) {
@@ -171,7 +192,7 @@ func (l *listener) subscribe(errCh chan error) {
// fill the list with the contracts with set event parsers.
l.mtx.RLock()
for hashType := range l.notificationParsers {
- scHash := hashType.Hash
+ scHash := hashType.ScriptHash()
// prevent repetitions
for _, hash := range hashes {
@@ -180,26 +201,26 @@ func (l *listener) subscribe(errCh chan error) {
}
}
- hashes = append(hashes, hashType.Hash)
+ hashes = append(hashes, hashType.ScriptHash())
}
l.mtx.RUnlock()
err := l.subscriber.SubscribeForNotification(hashes...)
if err != nil {
- errCh <- fmt.Errorf("subscribe for notifications: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for notifications: %w", err)
return
}
if len(l.blockHandlers) > 0 {
if err = l.subscriber.BlockNotifications(); err != nil {
- errCh <- fmt.Errorf("subscribe for blocks: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for blocks: %w", err)
return
}
}
if l.listenNotary {
if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
- errCh <- fmt.Errorf("subscribe for notary requests: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err)
return
}
}
@@ -213,7 +234,7 @@ func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error
// in the same routine when shutting down node.
select {
case <-ctx.Done():
- l.log.Info(ctx, logs.EventStopEventListenerByContext,
+ l.log.Info(logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
return false
@@ -230,81 +251,81 @@ loop:
select {
case err := <-subErrCh:
if !l.sendError(ctx, intErr, err) {
- l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err))
+ l.log.Error(logs.EventStopEventListenerByError, zap.Error(err))
}
break loop
case <-ctx.Done():
- l.log.Info(ctx, logs.EventStopEventListenerByContext,
+ l.log.Info(logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
break loop
case notifyEvent, ok := <-chs.NotificationsCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel)
+ l.log.Warn(logs.EventStopEventListenerByNotificationChannel)
l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated)
break loop
} else if notifyEvent == nil {
- l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught)
+ l.log.Warn(logs.EventNilNotificationEventWasCaught)
continue loop
}
- l.handleNotifyEvent(ctx, notifyEvent)
+ l.handleNotifyEvent(notifyEvent)
case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel)
+ l.log.Warn(logs.EventStopEventListenerByNotaryChannel)
l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated)
break loop
} else if notaryEvent == nil {
- l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught)
+ l.log.Warn(logs.EventNilNotaryEventWasCaught)
continue loop
}
- l.handleNotaryEvent(ctx, notaryEvent)
+ l.handleNotaryEvent(notaryEvent)
case b, ok := <-chs.BlockCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel)
+ l.log.Warn(logs.EventStopEventListenerByBlockChannel)
l.sendError(ctx, intErr, errBlockNotificationChannelClosed)
break loop
} else if b == nil {
- l.log.Warn(ctx, logs.EventNilBlockWasCaught)
+ l.log.Warn(logs.EventNilBlockWasCaught)
continue loop
}
- l.handleBlockEvent(ctx, b)
+ l.handleBlockEvent(b)
}
}
}
-func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) {
+func (l *listener) handleBlockEvent(b *block.Block) {
if err := l.pool.Submit(func() {
for i := range l.blockHandlers {
- l.blockHandlers[i](ctx, b)
+ l.blockHandlers[i](b)
}
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) {
+func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotary(ctx, notaryEvent)
+ l.parseAndHandleNotary(notaryEvent)
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotification(ctx, notifyEvent)
+ l.parseAndHandleNotification(notifyEvent)
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) {
log := l.log.With(
zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()),
)
@@ -317,14 +338,16 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
)
// get the event parser
- keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent}
+ keyEvent := scriptHashWithType{}
+ keyEvent.SetScriptHash(notifyEvent.ScriptHash)
+ keyEvent.SetType(typEvent)
l.mtx.RLock()
parser, ok := l.notificationParsers[keyEvent]
l.mtx.RUnlock()
if !ok {
- log.Debug(ctx, logs.EventEventParserNotSet)
+ log.Debug(logs.EventEventParserNotSet)
return
}
@@ -332,8 +355,8 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
// parse the notification event
event, err := parser(notifyEvent)
if err != nil {
- log.Warn(ctx, logs.EventCouldNotParseNotificationEvent,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotParseNotificationEvent,
+ zap.String("error", err.Error()),
)
return
@@ -345,7 +368,7 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
l.mtx.RUnlock()
if len(handlers) == 0 {
- log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
@@ -353,11 +376,11 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
}
for _, handler := range handlers {
- handler(ctx, event)
+ handler(event)
}
}
-func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) {
+func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
// prepare the notary event
notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest)
if err != nil {
@@ -365,14 +388,14 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
switch {
case errors.Is(err, ErrTXAlreadyHandled):
case errors.As(err, &expErr):
- l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent,
- zap.Error(err),
+ l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent,
+ zap.String("error", err.Error()),
zap.Uint32("current_block_height", expErr.CurrentBlockHeight),
zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight),
)
default:
- l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent,
- zap.Error(err),
+ l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent,
+ zap.String("error", err.Error()),
)
}
@@ -395,7 +418,7 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
l.mtx.RUnlock()
if !ok {
- log.Debug(ctx, logs.EventNotaryParserNotSet)
+ log.Debug(logs.EventNotaryParserNotSet)
return
}
@@ -403,8 +426,8 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
// parse the notary event
event, err := parser(notaryEvent)
if err != nil {
- log.Warn(ctx, logs.EventCouldNotParseNotaryEvent,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotParseNotaryEvent,
+ zap.String("error", err.Error()),
)
return
@@ -416,14 +439,47 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
l.mtx.RUnlock()
if !ok {
- log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
return
}
- handler(ctx, event)
+ handler(event)
+}
+
+// SetNotificationParser sets the parser of particular contract event.
+//
+// Ignores nil and already set parsers.
+// Ignores the parser if listener is started.
+func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
+ log := l.log.With(
+ zap.String("contract", pi.ScriptHash().StringLE()),
+ zap.Stringer("event_type", pi.getType()),
+ )
+
+ parser := pi.parser()
+ if parser == nil {
+ log.Info(logs.EventIgnoreNilEventParser)
+ return
+ }
+
+ l.mtx.Lock()
+ defer l.mtx.Unlock()
+
+ // check if the listener was started
+ if l.started {
+ log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser)
+ return
+ }
+
+ // add event parser
+ if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok {
+ l.notificationParsers[pi.scriptHashWithType] = pi.parser()
+ }
+
+ log.Debug(logs.EventRegisteredNewEventParser)
}
// RegisterNotificationHandler registers the handler for particular notification event of contract.
@@ -432,23 +488,35 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
// Ignores handlers of event without parser.
func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
log := l.log.With(
- zap.String("contract", hi.Contract.StringLE()),
- zap.Stringer("event_type", hi.Type),
+ zap.String("contract", hi.ScriptHash().StringLE()),
+ zap.Stringer("event_type", hi.GetType()),
)
+ handler := hi.Handler()
+ if handler == nil {
+ log.Warn(logs.EventIgnoreNilEventHandler)
+ return
+ }
+
// check if parser was set
+ l.mtx.RLock()
+ _, ok := l.notificationParsers[hi.scriptHashWithType]
+ l.mtx.RUnlock()
+
+ if !ok {
+ log.Warn(logs.EventIgnoreHandlerOfEventWoParser)
+ return
+ }
+
+ // add event handler
l.mtx.Lock()
- defer l.mtx.Unlock()
-
- k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type}
-
- l.notificationParsers[k] = hi.Parser
- l.notificationHandlers[k] = append(
- l.notificationHandlers[k],
- hi.Handlers...,
+ l.notificationHandlers[hi.scriptHashWithType] = append(
+ l.notificationHandlers[hi.scriptHashWithType],
+ hi.Handler(),
)
+ l.mtx.Unlock()
- log.Debug(context.Background(), logs.EventRegisteredNewEventHandler)
+ log.Debug(logs.EventRegisteredNewEventHandler)
}
// EnableNotarySupport enables notary request listening. Passed hash is
@@ -487,15 +555,27 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
zap.Stringer("notary_type", pi.RequestType()),
)
+ parser := pi.parser()
+ if parser == nil {
+ log.Info(logs.EventIgnoreNilNotaryEventParser)
+ return
+ }
+
l.mtx.Lock()
defer l.mtx.Unlock()
+ // check if the listener was started
+ if l.started {
+ log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser)
+ return
+ }
+
// add event parser
if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok {
l.notaryParsers[pi.notaryRequestTypes] = pi.parser()
}
- log.Info(context.Background(), logs.EventRegisteredNewEventParser)
+ log.Info(logs.EventRegisteredNewEventParser)
}
// RegisterNotaryHandler registers the handler for particular notification notary request event.
@@ -513,13 +593,19 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
zap.Stringer("notary type", hi.RequestType()),
)
+ handler := hi.Handler()
+ if handler == nil {
+ log.Warn(logs.EventIgnoreNilNotaryEventHandler)
+ return
+ }
+
// check if parser was set
l.mtx.RLock()
_, ok := l.notaryParsers[hi.notaryRequestTypes]
l.mtx.RUnlock()
if !ok {
- log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser)
+ log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser)
return
}
@@ -528,7 +614,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler()
l.mtx.Unlock()
- log.Info(context.Background(), logs.EventRegisteredNewEventHandler)
+ log.Info(logs.EventRegisteredNewEventHandler)
}
// Stop closes subscription channel with remote neo node.
@@ -541,6 +627,11 @@ func (l *listener) Stop() {
}
func (l *listener) RegisterBlockHandler(handler BlockHandler) {
+ if handler == nil {
+ l.log.Warn(logs.EventIgnoreNilBlockHandler)
+ return
+ }
+
l.blockHandlers = append(l.blockHandlers, handler)
}
@@ -557,7 +648,7 @@ func NewListener(p ListenerParams) (Listener, error) {
// The default capacity is 0, which means "infinite".
pool, err := ants.NewPool(p.WorkerPoolCapacity)
if err != nil {
- return nil, fmt.Errorf("init worker pool: %w", err)
+ return nil, fmt.Errorf("could not init worker pool: %w", err)
}
return &listener{
diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go
index 87f37305f..5f7cf9f43 100644
--- a/pkg/morph/event/listener_test.go
+++ b/pkg/morph/event/listener_test.go
@@ -34,24 +34,34 @@ func TestEventHandling(t *testing.T) {
blockHandled := make(chan bool)
handledBlocks := make([]*block.Block, 0)
- l.RegisterBlockHandler(func(_ context.Context, b *block.Block) {
+ l.RegisterBlockHandler(func(b *block.Block) {
handledBlocks = append(handledBlocks, b)
blockHandled <- true
})
+ key := scriptHashWithType{
+ scriptHashValue: scriptHashValue{
+ hash: util.Uint160{100},
+ },
+ typeValue: typeValue{
+ typ: TypeFromString("notification type"),
+ },
+ }
+
+ l.SetNotificationParser(NotificationParserInfo{
+ scriptHashWithType: key,
+ p: func(cne *state.ContainedNotificationEvent) (Event, error) {
+ return testNotificationEvent{source: cne}, nil
+ },
+ })
+
notificationHandled := make(chan bool)
handledNotifications := make([]Event, 0)
l.RegisterNotificationHandler(NotificationHandlerInfo{
- Contract: util.Uint160{100},
- Type: TypeFromString("notification type"),
- Parser: func(cne *state.ContainedNotificationEvent) (Event, error) {
- return testNotificationEvent{source: cne}, nil
- },
- Handlers: []Handler{
- func(_ context.Context, e Event) {
- handledNotifications = append(handledNotifications, e)
- notificationHandled <- true
- },
+ scriptHashWithType: key,
+ h: func(e Event) {
+ handledNotifications = append(handledNotifications, e)
+ notificationHandled <- true
},
})
@@ -127,7 +137,7 @@ func TestErrorPassing(t *testing.T) {
WorkerPoolCapacity: 10,
})
require.NoError(t, err, "failed to create listener")
- l.RegisterBlockHandler(func(context.Context, *block.Block) {})
+ l.RegisterBlockHandler(func(b *block.Block) {})
errCh := make(chan error)
diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go
index 39c8f6237..e454e2a6a 100644
--- a/pkg/morph/event/netmap/epoch.go
+++ b/pkg/morph/event/netmap/epoch.go
@@ -1,7 +1,9 @@
package netmap
import (
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -35,13 +37,22 @@ func (s NewEpoch) TxHash() util.Uint256 {
//
// Result is type of NewEpoch.
func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) {
- var nee netmap.NewEpochEvent
- if err := nee.FromStackItem(e.Item); err != nil {
- return nil, err
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ if ln := len(params); ln != 1 {
+ return nil, event.WrongNumberOfParameters(1, ln)
+ }
+
+ prmEpochNum, err := client.IntFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get integer epoch number: %w", err)
}
return NewEpoch{
- Num: nee.Epoch.Uint64(),
+ Num: uint64(prmEpochNum),
Hash: e.Container,
}, nil
}
diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go
index 6ff692327..bc267ecb6 100644
--- a/pkg/morph/event/netmap/epoch_test.go
+++ b/pkg/morph/event/netmap/epoch_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -16,7 +17,7 @@ func TestParseNewEpoch(t *testing.T) {
}
_, err := ParseNewEpoch(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
})
t.Run("wrong first parameter type", func(t *testing.T) {
diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go
index 993182ab4..0260810b8 100644
--- a/pkg/morph/event/netmap/update_peer_notary.go
+++ b/pkg/morph/event/netmap/update_peer_notary.go
@@ -10,7 +10,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
)
-var errNilPubKey = errors.New("public key is nil")
+var errNilPubKey = errors.New("could not parse public key: public key is nil")
func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
if v == nil {
@@ -19,7 +19,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256())
if err != nil {
- return fmt.Errorf("parse public key: %w", err)
+ return fmt.Errorf("could not parse public key: %w", err)
}
return
diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go
index b11973646..37091f768 100644
--- a/pkg/morph/event/notary_preparator.go
+++ b/pkg/morph/event/notary_preparator.go
@@ -127,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
for {
opCode, param, err = ctx.Next()
if err != nil {
- return nil, fmt.Errorf("get next opcode in script: %w", err)
+ return nil, fmt.Errorf("could not get next opcode in script: %w", err)
}
if opCode == opcode.RET {
@@ -147,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
// retrieve contract's script hash
contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param)
if err != nil {
- return nil, fmt.Errorf("decode contract hash: %w", err)
+ return nil, fmt.Errorf("could not decode contract hash: %w", err)
}
// retrieve contract's method
@@ -164,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
if len(args) != 0 {
err = p.validateParameterOpcodes(args)
if err != nil {
- return nil, fmt.Errorf("validate arguments: %w", err)
+ return nil, fmt.Errorf("could not validate arguments: %w", err)
}
// without args packing opcodes
@@ -199,14 +199,14 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error {
// neo-go API)
//
// this check prevents notary flow recursion
- if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 &&
- !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version
+ if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 ||
+ bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version
return ErrTXAlreadyHandled
}
currentAlphabet, err := p.alphaKeys()
if err != nil {
- return fmt.Errorf("fetch Alphabet public keys: %w", err)
+ return fmt.Errorf("could not fetch Alphabet public keys: %w", err)
}
err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
@@ -239,7 +239,7 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error {
argsLen, err := IntFromOpcode(ops[l-2])
if err != nil {
- return fmt.Errorf("parse argument len: %w", err)
+ return fmt.Errorf("could not parse argument len: %w", err)
}
err = validateNestedArgs(argsLen, ops[:l-2])
@@ -273,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error {
argsLen, err := IntFromOpcode(ops[i-1])
if err != nil {
- return fmt.Errorf("parse argument len: %w", err)
+ return fmt.Errorf("could not parse argument len: %w", err)
}
expArgLen += argsLen + 1
@@ -307,7 +307,7 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error {
currBlock, err := p.blockCounter.BlockCount()
if err != nil {
- return fmt.Errorf("fetch current chain height: %w", err)
+ return fmt.Errorf("could not fetch current chain height: %w", err)
}
if currBlock >= nvb.Height {
@@ -327,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("get Alphabet verification script: %w", err)
+ return fmt.Errorf("could not get Alphabet verification script: %w", err)
}
if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) {
@@ -346,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("get Alphabet verification script: %w", err)
+ return fmt.Errorf("could not get Alphabet verification script: %w", err)
}
// the second one must be witness of the current
@@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
// the last one must be a placeholder for notary contract witness
last := len(w) - 1
- if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981
- !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
+ if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981
+ bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
len(w[last].VerificationScript) != 0 {
return errIncorrectNotaryPlaceholder
}
diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go
index 5adeb4b30..90eff0bd2 100644
--- a/pkg/morph/event/parsers.go
+++ b/pkg/morph/event/parsers.go
@@ -11,6 +11,15 @@ import (
// from the StackItem list.
type NotificationParser func(*state.ContainedNotificationEvent) (Event, error)
+// NotificationParserInfo is a structure that groups
+// the parameters of particular contract
+// notification event parser.
+type NotificationParserInfo struct {
+ scriptHashWithType
+
+ p NotificationParser
+}
+
// NotaryPreparator constructs NotaryEvent
// from the NotaryRequest event.
type NotaryPreparator interface {
@@ -38,6 +47,24 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) {
n.p = p
}
+// SetParser is an event parser setter.
+func (s *NotificationParserInfo) SetParser(v NotificationParser) {
+ s.p = v
+}
+
+func (s NotificationParserInfo) parser() NotificationParser {
+ return s.p
+}
+
+// SetType is an event type setter.
+func (s *NotificationParserInfo) SetType(v Type) {
+ s.typ = v
+}
+
+func (s NotificationParserInfo) getType() Type {
+ return s.typ
+}
+
type wrongPrmNumber struct {
exp, act int
}
diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go
index b384e436b..28c968046 100644
--- a/pkg/morph/event/rolemanagement/designate.go
+++ b/pkg/morph/event/rolemanagement/designate.go
@@ -26,7 +26,7 @@ func (Designate) MorphEvent() {}
func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) {
params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
if len(params) != 2 {
diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go
index 0088be400..f3b6443fb 100644
--- a/pkg/morph/event/utils.go
+++ b/pkg/morph/event/utils.go
@@ -1,7 +1,6 @@
package event
import (
- "context"
"errors"
"fmt"
@@ -20,9 +19,13 @@ type scriptHashValue struct {
hash util.Uint160
}
+type typeValue struct {
+ typ Type
+}
+
type scriptHashWithType struct {
- Hash util.Uint160
- Type Type
+ scriptHashValue
+ typeValue
}
type notaryRequestTypes struct {
@@ -69,15 +72,25 @@ func (s scriptHashValue) ScriptHash() util.Uint160 {
return s.hash
}
+// SetType is an event type setter.
+func (s *typeValue) SetType(v Type) {
+ s.typ = v
+}
+
+// GetType is an event type getter.
+func (s typeValue) GetType() Type {
+ return s.typ
+}
+
// WorkerPoolHandler sets closure over worker pool w with passed handler h.
func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler {
- return func(ctx context.Context, e Event) {
+ return func(e Event) {
err := w.Submit(func() {
- h(ctx, e)
+ h(e)
})
if err != nil {
- log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool,
+ zap.String("error", err.Error()),
)
}
}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index 4ef59ed6a..ee5466a7d 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -245,16 +245,16 @@ routeloop:
}
func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool {
- s.log.Info(ctx, logs.RPConnectionLost)
+ s.log.Info(logs.RPConnectionLost)
if !s.client.SwitchRPC(ctx) {
- s.log.Error(ctx, logs.RPCNodeSwitchFailure)
+ s.log.Error(logs.RPCNodeSwitchFailure)
return false
}
s.Lock()
chs := newSubChannels()
go func() {
- finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
+ finishCh <- s.restoreSubscriptions(chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
}()
s.current = chs
s.Unlock()
@@ -295,7 +295,7 @@ drainloop:
// restoreSubscriptions restores subscriptions according to
// cached information about them.
-func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent,
+func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotificationEvent,
blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent,
) bool {
var err error
@@ -304,7 +304,7 @@ func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *s
if s.subscribedToNewBlocks {
_, err = s.client.ReceiveBlocks(blCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
@@ -313,7 +313,7 @@ func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *s
for contract := range s.subscribedEvents {
_, err = s.client.ReceiveExecutionNotifications(contract, notifCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
@@ -322,7 +322,7 @@ func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *s
for signer := range s.subscribedNotaryEvents {
_, err = s.client.ReceiveNotaryRequests(signer, notaryCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go
index 974be1120..be20d3571 100644
--- a/pkg/morph/timer/block.go
+++ b/pkg/morph/timer/block.go
@@ -15,19 +15,41 @@ type BlockTickHandler func()
// It can tick the blocks and perform certain actions
// on block time intervals.
type BlockTimer struct {
+ rolledBack bool
+
mtx sync.Mutex
dur BlockMeter
baseDur uint32
+ mul, div uint32
+
cur, tgt uint32
last uint32
h BlockTickHandler
+ ps []BlockTimer
+
once bool
+
+ deltaCfg
+}
+
+// DeltaOption is an option of delta-interval handler.
+type DeltaOption func(*deltaCfg)
+
+type deltaCfg struct {
+ pulse bool
+}
+
+// WithPulse returns option to call delta-interval handler multiple times.
+func WithPulse() DeltaOption {
+ return func(c *deltaCfg) {
+ c.pulse = true
+ }
}
// StaticBlockMeter returns BlockMeters that always returns (d, nil).
@@ -43,19 +65,52 @@ func StaticBlockMeter(d uint32) BlockMeter {
func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
+ mul: 1,
+ div: 1,
h: h,
+ deltaCfg: deltaCfg{
+ pulse: true,
+ },
}
}
// NewOneTickTimer creates a new BlockTimer that ticks only once.
+//
+// Do not use delta handlers with pulse in this timer.
func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
+ mul: 1,
+ div: 1,
h: h,
once: true,
}
}
+// OnDelta registers handler which is executed on (mul / div * BlockMeter()) block
+// after basic interval reset.
+//
+// If WithPulse option is provided, handler is executed (mul / div * BlockMeter()) block
+// during base interval.
+func (t *BlockTimer) OnDelta(mul, div uint32, h BlockTickHandler, opts ...DeltaOption) {
+ c := deltaCfg{
+ pulse: false,
+ }
+
+ for i := range opts {
+ opts[i](&c)
+ }
+
+ t.ps = append(t.ps, BlockTimer{
+ mul: mul,
+ div: div,
+ h: h,
+ once: t.once,
+
+ deltaCfg: c,
+ })
+}
+
// Reset resets previous ticks of the BlockTimer.
//
// Returns BlockMeter's error upon occurrence.
@@ -69,18 +124,29 @@ func (t *BlockTimer) Reset() error {
t.resetWithBaseInterval(d)
+ for i := range t.ps {
+ t.ps[i].resetWithBaseInterval(d)
+ }
+
t.mtx.Unlock()
return nil
}
func (t *BlockTimer) resetWithBaseInterval(d uint32) {
+ t.rolledBack = false
t.baseDur = d
t.reset()
}
func (t *BlockTimer) reset() {
- delta := t.baseDur
+ mul, div := t.mul, t.div
+
+ if !t.pulse && t.rolledBack && mul < div {
+ mul, div = 1, 1
+ }
+
+ delta := mul * t.baseDur / div
if delta == 0 {
delta = 1
}
@@ -114,7 +180,12 @@ func (t *BlockTimer) tick(h uint32) {
if !t.once {
t.cur = 0
+ t.rolledBack = true
t.reset()
}
}
+
+ for i := range t.ps {
+ t.ps[i].tick(h)
+ }
}
diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go
index a144b3db6..ee6091845 100644
--- a/pkg/morph/timer/block_test.go
+++ b/pkg/morph/timer/block_test.go
@@ -1,7 +1,6 @@
package timer_test
import (
- "errors"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
@@ -18,7 +17,7 @@ func tickN(t *timer.BlockTimer, n uint32) {
// "resetting" consists of ticking the current height as well and invoking `Reset`.
func TestIRBlockTimer_Reset(t *testing.T) {
var baseCounter [2]int
- const blockDur = uint32(3)
+ blockDur := uint32(3)
bt1 := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
@@ -49,40 +48,8 @@ func TestIRBlockTimer_Reset(t *testing.T) {
require.Equal(t, baseCounter[0], baseCounter[1])
}
-func TestBlockTimer_ResetChangeDuration(t *testing.T) {
- var dur uint32 = 2
- var err error
- var counter int
-
- bt := timer.NewBlockTimer(
- func() (uint32, error) { return dur, err },
- func() { counter++ })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 2)
- require.Equal(t, 1, counter)
-
- t.Run("return error", func(t *testing.T) {
- dur = 5
- err = errors.New("my awesome error")
- require.ErrorIs(t, bt.Reset(), err)
-
- tickN(bt, 2)
- require.Equal(t, 2, counter)
- })
- t.Run("change duration", func(t *testing.T) {
- dur = 5
- err = nil
- require.NoError(t, bt.Reset())
-
- tickN(bt, 5)
- require.Equal(t, 3, counter)
- })
-}
-
func TestBlockTimer(t *testing.T) {
- const blockDur = uint32(10)
+ blockDur := uint32(10)
baseCallCounter := uint32(0)
bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
@@ -96,6 +63,85 @@ func TestBlockTimer(t *testing.T) {
tickN(bt, intervalNum*blockDur)
require.Equal(t, intervalNum, uint32(baseCallCounter))
+
+ // add half-interval handler
+ halfCallCounter := uint32(0)
+
+ bt.OnDelta(1, 2, func() {
+ halfCallCounter++
+ })
+
+ // add double interval handler
+ doubleCallCounter := uint32(0)
+
+ bt.OnDelta(2, 1, func() {
+ doubleCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ baseCallCounter = 0
+ intervalNum = 20
+
+ tickN(bt, intervalNum*blockDur)
+
+ require.Equal(t, intervalNum, uint32(halfCallCounter))
+ require.Equal(t, intervalNum, uint32(baseCallCounter))
+ require.Equal(t, intervalNum/2, uint32(doubleCallCounter))
+}
+
+func TestDeltaPulse(t *testing.T) {
+ blockDur := uint32(9)
+ baseCallCounter := uint32(0)
+
+ bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ deltaCallCounter := uint32(0)
+
+ div := uint32(3)
+
+ bt.OnDelta(1, div, func() {
+ deltaCallCounter++
+ }, timer.WithPulse())
+
+ require.NoError(t, bt.Reset())
+
+ intervalNum := uint32(7)
+
+ tickN(bt, intervalNum*blockDur)
+
+ require.Equal(t, intervalNum, uint32(baseCallCounter))
+ require.Equal(t, intervalNum*div, uint32(deltaCallCounter))
+}
+
+func TestDeltaReset(t *testing.T) {
+ blockDur := uint32(6)
+ baseCallCounter := 0
+
+ bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ detlaCallCounter := 0
+
+ bt.OnDelta(1, 3, func() {
+ detlaCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 6)
+
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 3)
+
+ require.Equal(t, 2, detlaCallCounter)
}
func TestNewOneTickTimer(t *testing.T) {
@@ -122,51 +168,82 @@ func TestNewOneTickTimer(t *testing.T) {
tickN(bt, 10)
require.Equal(t, 1, baseCallCounter)
})
+
+ t.Run("delta without pulse", func(t *testing.T) {
+ blockDur = uint32(10)
+ baseCallCounter = 0
+
+ bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ detlaCallCounter := 0
+
+ bt.OnDelta(1, 10, func() {
+ detlaCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 10)
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+
+ tickN(bt, 10) // 10 more ticks must not affect counters
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+ })
}
func TestBlockTimer_TickSameHeight(t *testing.T) {
- var baseCounter int
+ var baseCounter, deltaCounter int
blockDur := uint32(2)
bt := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
func() { baseCounter++ })
+ bt.OnDelta(2, 1, func() {
+ deltaCounter++
+ })
require.NoError(t, bt.Reset())
- check := func(t *testing.T, h uint32, base int) {
+ check := func(t *testing.T, h uint32, base, delta int) {
for range 2 * int(blockDur) {
bt.Tick(h)
require.Equal(t, base, baseCounter)
+ require.Equal(t, delta, deltaCounter)
}
}
- check(t, 1, 0)
- check(t, 2, 1)
- check(t, 3, 1)
- check(t, 4, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 1, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 2, 1)
t.Run("works the same way after `Reset()`", func(t *testing.T) {
t.Run("same block duration", func(t *testing.T) {
require.NoError(t, bt.Reset())
baseCounter = 0
+ deltaCounter = 0
- check(t, 1, 0)
- check(t, 2, 1)
- check(t, 3, 1)
- check(t, 4, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 1, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 2, 1)
})
t.Run("different block duration", func(t *testing.T) {
blockDur = 3
require.NoError(t, bt.Reset())
baseCounter = 0
+ deltaCounter = 0
- check(t, 1, 0)
- check(t, 2, 0)
- check(t, 3, 1)
- check(t, 4, 1)
- check(t, 5, 1)
- check(t, 6, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 0, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 1, 0)
+ check(t, 5, 1, 0)
+ check(t, 6, 2, 1)
})
})
}
diff --git a/pkg/network/address.go b/pkg/network/address.go
index 4643eef15..cb83a813d 100644
--- a/pkg/network/address.go
+++ b/pkg/network/address.go
@@ -2,11 +2,11 @@ package network
import (
"errors"
+ "fmt"
"net"
"net/url"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
@@ -44,9 +44,11 @@ func (a Address) equal(addr Address) bool {
// See also FromString.
func (a Address) URIAddr() string {
_, host, err := manet.DialArgs(a.ma)
- // the only correct way to construct Address is AddressFromString
- // which makes this error appear unexpected
- assert.NoError(err, "could not get host addr")
+ if err != nil {
+ // the only correct way to construct Address is AddressFromString
+ // which makes this error appear unexpected
+ panic(fmt.Errorf("could not get host addr: %w", err))
+ }
if !a.IsTLSEnabled() {
return host
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 54c1e18fb..481d1ea4a 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -7,12 +7,10 @@ import (
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -64,16 +62,12 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address
grpcOpts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
- qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInterceptor(),
- tagging.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
- qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
- tagging.NewStreamClientInterceptor(),
),
grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
@@ -161,7 +155,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
group.IterateAddresses(func(addr network.Address) bool {
select {
case <-ctx.Done():
- firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled)
+ firstErr = context.Canceled
return true
default:
}
diff --git a/pkg/network/group.go b/pkg/network/group.go
index 0044fb2d4..9843b14d4 100644
--- a/pkg/network/group.go
+++ b/pkg/network/group.go
@@ -3,8 +3,6 @@ package network
import (
"errors"
"fmt"
- "iter"
- "slices"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -69,8 +67,9 @@ func (x AddressGroup) Swap(i, j int) {
// MultiAddressIterator is an interface of network address group.
type MultiAddressIterator interface {
- // Addresses must return an iterator over network addresses.
- Addresses() iter.Seq[string]
+ // IterateAddresses must iterate over network addresses and pass each one
+ // to the handler until it returns true.
+ IterateAddresses(func(string) bool)
// NumberOfAddresses must return number of addresses in group.
NumberOfAddresses() int
@@ -131,19 +130,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error {
// iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f
// until 1st parsing failure or f's error.
func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) {
- for s := range iter.Addresses() {
+ iter.IterateAddresses(func(s string) bool {
var a Address
err = a.FromString(s)
if err != nil {
- return fmt.Errorf("could not parse address from string: %w", err)
+ err = fmt.Errorf("could not parse address from string: %w", err)
+ return true
}
err = f(a)
- if err != nil {
- return err
- }
- }
+
+ return err != nil
+ })
return
}
@@ -165,8 +164,10 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) {
// at least one common address.
func (x AddressGroup) Intersects(x2 AddressGroup) bool {
for i := range x {
- if slices.ContainsFunc(x2, x[i].equal) {
- return true
+ for j := range x2 {
+ if x[i].equal(x2[j]) {
+ return true
+ }
}
}
diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go
index d08264533..5b335fa52 100644
--- a/pkg/network/group_test.go
+++ b/pkg/network/group_test.go
@@ -1,8 +1,6 @@
package network
import (
- "iter"
- "slices"
"sort"
"testing"
@@ -60,8 +58,10 @@ func TestAddressGroup_FromIterator(t *testing.T) {
type testIterator []string
-func (t testIterator) Addresses() iter.Seq[string] {
- return slices.Values(t)
+func (t testIterator) IterateAddresses(f func(string) bool) {
+ for i := range t {
+ f(t[i])
+ }
}
func (t testIterator) NumberOfAddresses() int {
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index 8cbf8d9c3..49d083a90 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -80,26 +80,3 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
-
-type containerStreamerV2 struct {
- containerGRPC.ContainerService_ListStreamServer
-}
-
-func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error {
- return s.ContainerService_ListStreamServer.Send(
- resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse),
- )
-}
-
-// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data
-// to gRPC stream.
-func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error {
- listReq := new(container.ListStreamRequest)
- if err := listReq.FromGRPCMessage(req); err != nil {
- return err
- }
-
- return s.srv.ListStream(listReq, &containerStreamerV2{
- ContainerService_ListStreamServer: gStream,
- })
-}
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index 15dacd553..fa6252118 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -26,7 +26,7 @@ func New(c objectSvc.ServiceServer) *Server {
// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream.
func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
- stream, err := s.srv.Patch(gStream.Context())
+ stream, err := s.srv.Patch()
if err != nil {
return err
}
@@ -68,7 +68,7 @@ func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
- stream, err := s.srv.Put(gStream.Context())
+ stream, err := s.srv.Put()
if err != nil {
return err
}
diff --git a/pkg/network/validation.go b/pkg/network/validation.go
index b5157f28f..92f650119 100644
--- a/pkg/network/validation.go
+++ b/pkg/network/validation.go
@@ -2,7 +2,6 @@ package network
import (
"errors"
- "iter"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -35,8 +34,8 @@ var (
// MultiAddressIterator.
type NodeEndpointsIterator netmap.NodeInfo
-func (x NodeEndpointsIterator) Addresses() iter.Seq[string] {
- return (netmap.NodeInfo)(x).NetworkEndpoints()
+func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) {
+ (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
}
func (x NodeEndpointsIterator) NumberOfAddresses() int {
diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go
index 6c2df8428..b77d3e3e6 100644
--- a/pkg/services/accounting/morph/executor.go
+++ b/pkg/services/accounting/morph/executor.go
@@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
+func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errors.New("missing account")
@@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceReq
return nil, fmt.Errorf("invalid account: %w", err)
}
- amount, err := s.client.BalanceOf(ctx, id)
+ amount, err := s.client.BalanceOf(id)
if err != nil {
return nil, err
}
- balancePrecision, err := s.client.Decimals(ctx)
+ balancePrecision, err := s.client.Decimals()
if err != nil {
return nil, err
}
diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go
index 61fb025b8..b9bea07fb 100644
--- a/pkg/services/apemanager/audit.go
+++ b/pkg/services/apemanager/audit.go
@@ -33,7 +33,7 @@ func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainReq
return res, err
}
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
+ audit.LogRequest(a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
res.GetBody().GetChainID()),
@@ -49,7 +49,7 @@ func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChain
return res, err
}
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
+ audit.LogRequest(a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
nil),
@@ -65,7 +65,7 @@ func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveCh
return res, err
}
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
+ audit.LogRequest(a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
req.GetBody().GetChainID()),
diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go
index 1d485321c..e64f9a8d1 100644
--- a/pkg/services/apemanager/errors/errors.go
+++ b/pkg/services/apemanager/errors/errors.go
@@ -9,9 +9,3 @@ func ErrAPEManagerAccessDenied(reason string) error {
err.WriteReason(reason)
return err
}
-
-func ErrAPEManagerInvalidArgument(msg string) error {
- err := new(apistatus.InvalidArgument)
- err.SetMessage(msg)
- return err
-}
diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go
index fc08fe569..86f9cb893 100644
--- a/pkg/services/apemanager/executor.go
+++ b/pkg/services/apemanager/executor.go
@@ -22,7 +22,6 @@ import (
policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/mr-tron/base58/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
@@ -35,8 +34,6 @@ type cfg struct {
type Service struct {
cfg
- waiter Waiter
-
cnrSrc containercore.Source
contractStorage ape_contract.ProxyAdaptedContractStorage
@@ -44,17 +41,11 @@ type Service struct {
type Option func(*cfg)
-type Waiter interface {
- WaitTxHalt(context.Context, uint32, util.Uint256) error
-}
-
-func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service {
+func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, opts ...Option) *Service {
s := &Service{
cnrSrc: cnrSrc,
contractStorage: contractStorage,
-
- waiter: waiter,
}
for i := range opts {
@@ -62,7 +53,7 @@ func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedC
}
if s.log == nil {
- s.log = logger.NewLoggerWrapper(zap.NewNop())
+ s.log = &logger.Logger{Logger: zap.NewNop()}
}
return s
@@ -78,12 +69,12 @@ var _ Server = (*Service)(nil)
// validateContainerTargetRequest validates request for the container target.
// It checks if request actor is the owner of the container, otherwise it denies the request.
-func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error {
+func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.PublicKey) error {
var cidSDK cidSDK.ID
if err := cidSDK.DecodeString(cid); err != nil {
- return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err))
+ return fmt.Errorf("invalid CID format: %w", err)
}
- isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey)
+ isOwner, err := s.isActorContainerOwner(cidSDK, pubKey)
if err != nil {
return fmt.Errorf("failed to check owner: %w", err)
}
@@ -93,7 +84,7 @@ func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string
return nil
}
-func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
+func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -101,7 +92,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques
chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw())
if err != nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error())
+ return nil, err
}
if len(chain.ID) == 0 {
const randomIDLength = 10
@@ -117,19 +108,15 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ return nil, fmt.Errorf("unsupported target type: %s", targetType)
}
- txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain)
- if err != nil {
- return nil, err
- }
- if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
+ if _, _, err = s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain); err != nil {
return nil, err
}
@@ -142,7 +129,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques
return resp, nil
}
-func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
+func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -153,19 +140,15 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ return nil, fmt.Errorf("unsupported target type: %s", targetType)
}
- txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID())
- if err != nil {
- return nil, err
- }
- if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
+ if _, _, err = s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()); err != nil {
return nil, err
}
@@ -177,7 +160,7 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain
return resp, nil
}
-func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
+func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -188,12 +171,12 @@ func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRe
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ return nil, fmt.Errorf("unsupported target type: %s", targetType)
}
chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target)
@@ -227,23 +210,23 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK
}
sig := vh.GetBodySignature()
if sig == nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error())
+ return nil, errEmptyBodySignature
}
key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
if err != nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err))
+ return nil, fmt.Errorf("invalid signature key: %w", err)
}
return key, nil
}
-func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
+func (s *Service) isActorContainerOwner(cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
var actor user.ID
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
actorOwnerID := new(refs.OwnerID)
actor.WriteToV2(actorOwnerID)
- cnr, err := s.cnrSrc.Get(ctx, cid)
+ cnr, err := s.cnrSrc.Get(cid)
if err != nil {
return false, fmt.Errorf("get container error: %w", err)
}
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
index eb6263320..278f6da31 100644
--- a/pkg/services/common/ape/checker.go
+++ b/pkg/services/common/ape/checker.go
@@ -1,7 +1,6 @@
package ape
import (
- "context"
"crypto/ecdsa"
"errors"
"fmt"
@@ -12,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
@@ -20,6 +20,7 @@ import (
)
var (
+ errInvalidTargetType = errors.New("bearer token defines non-container target override")
errBearerExpired = errors.New("bearer token has expired")
errBearerInvalidSignature = errors.New("bearer token has invalid signature")
errBearerInvalidContainerID = errors.New("bearer token was created for another container")
@@ -43,12 +44,15 @@ type CheckPrm struct {
// The request's bearer token. It is used in order to check APE overrides with the token.
BearerToken *bearer.Token
+
+ // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
+ SoftAPECheck bool
}
// CheckCore provides methods to perform the common logic of APE check.
type CheckCore interface {
// CheckAPE performs the common policy-engine check logic on a prepared request.
- CheckAPE(ctx context.Context, prm CheckPrm) error
+ CheckAPE(prm CheckPrm) error
}
type checkerCoreImpl struct {
@@ -70,30 +74,22 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora
}
// CheckAPE performs the common policy-engine check logic on a prepared request.
-func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error {
+func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error {
var cr policyengine.ChainRouter
- if prm.BearerToken != nil {
+ if prm.BearerToken != nil && !prm.BearerToken.Impersonate() {
var err error
if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil {
return fmt.Errorf("bearer validation error: %w", err)
}
- if prm.BearerToken.Impersonate() {
- cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
- } else {
- override, isSet := prm.BearerToken.APEOverride()
- if !isSet {
- return errors.New("expected for override within bearer")
- }
- cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override)
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
- }
+ cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride())
+ if err != nil {
+ return fmt.Errorf("create chain router error: %w", err)
}
} else {
cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
}
- groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey)
+ groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey)
if err != nil {
return fmt.Errorf("failed to get group ids: %w", err)
}
@@ -108,10 +104,17 @@ func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error {
if err != nil {
return err
}
- if found && status == apechain.Allow {
+ if !found && prm.SoftAPECheck || status == apechain.Allow {
return nil
}
- return newChainRouterError(prm.Request.Operation(), status)
+ err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String())
+ return apeErr(err)
+}
+
+func apeErr(err error) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(err.Error())
+ return errAccessDenied
}
// isValidBearer checks whether bearer token was correctly signed by authorized
@@ -133,19 +136,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe
}
// Check for ape overrides defined in the bearer token.
- if apeOverride, isSet := token.APEOverride(); isSet {
- switch apeOverride.Target.TargetType {
- case ape.TargetTypeContainer:
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !cntID.Equals(targetCnr) {
- return errBearerInvalidContainerID
- }
- default:
- }
+ apeOverride := token.APEOverride()
+ if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
+ return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
+ }
+
+ // Then check if container is either empty or equal to the container in the request.
+ var targetCnr cid.ID
+ err := targetCnr.DecodeString(apeOverride.Target.Name)
+ if err != nil {
+ return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
+ }
+ if !cntID.Equals(targetCnr) {
+ return errBearerInvalidContainerID
}
// Then check if container owner signed this token.
@@ -157,16 +160,8 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe
var usrSender user.ID
user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
- // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's
- // public key, but not the actual sender.
- if !token.Impersonate() {
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
- }
- } else {
- if !bearer.ResolveIssuer(*token).Equals(usrSender) {
- return errBearerInvalidOwner
- }
+ if !token.AssertUser(usrSender) {
+ return errBearerInvalidOwner
}
return nil
diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go
deleted file mode 100644
index d3c381de7..000000000
--- a/pkg/services/common/ape/error.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package ape
-
-import (
- "fmt"
-
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
-)
-
-// ChainRouterError is returned when chain router validation prevents
-// the APE request from being processed (no rule found, access denied, etc.).
-type ChainRouterError struct {
- operation string
- status apechain.Status
-}
-
-func (e *ChainRouterError) Error() string {
- return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status())
-}
-
-func (e *ChainRouterError) Operation() string {
- return e.operation
-}
-
-func (e *ChainRouterError) Status() apechain.Status {
- return e.status
-}
-
-func newChainRouterError(operation string, status apechain.Status) *ChainRouterError {
- return &ChainRouterError{
- operation: operation,
- status: status,
- }
-}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
index 3b5dab9aa..2cdb30b45 100644
--- a/pkg/services/container/ape.go
+++ b/pkg/services/container/ape.go
@@ -49,11 +49,11 @@ var (
)
type ir interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
+ InnerRingKeys() ([][]byte, error)
}
type containers interface {
- Get(context.Context, cid.ID) (*containercore.Container, error)
+ Get(cid.ID) (*containercore.Container, error)
}
type apeChecker struct {
@@ -106,7 +106,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
defer span.End()
- role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
if err != nil {
return nil, err
}
@@ -116,7 +116,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
if err != nil {
return nil, err
}
@@ -126,11 +126,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
}
}
- namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
+ namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("could not get owner namespace: %w", err)
}
- if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
+ if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil {
return nil, err
}
@@ -143,7 +143,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
reqProps,
)
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
if err != nil {
return nil, fmt.Errorf("failed to get group ids: %w", err)
}
@@ -175,84 +175,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
return nil, apeErr(nativeschema.MethodListContainers, s)
}
-func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream")
- defer span.End()
-
- role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
- if err != nil {
- return err
- }
-
- reqProps := map[string]string{
- nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
- nativeschema.PropertyKeyActorRole: role,
- }
-
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
- if err != nil {
- return err
- }
- if p, ok := peer.FromContext(ctx); ok {
- if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
- reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
- }
- }
-
- namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
- if err != nil {
- return fmt.Errorf("could not get owner namespace: %w", err)
- }
- if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
- return err
- }
-
- request := aperequest.NewRequest(
- nativeschema.MethodListContainers,
- aperequest.NewResource(
- resourceName(namespace, ""),
- make(map[string]string),
- ),
- reqProps,
- )
-
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
- }
-
- rt := policyengine.NewRequestTargetWithNamespace(namespace)
- rt.User = &policyengine.Target{
- Type: policyengine.User,
- Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
- }
- rt.Groups = make([]policyengine.Target, len(groups))
- for i := range groups {
- rt.Groups[i] = policyengine.GroupTarget(groups[i])
- }
-
- s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
- if err != nil {
- return err
- }
-
- if found && s == apechain.Allow {
- return ac.next.ListStream(req, stream)
- }
-
- return apeErr(nativeschema.MethodListContainers, s)
-}
-
func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put")
defer span.End()
- role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
if err != nil {
return nil, err
}
@@ -262,7 +189,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
if err != nil {
return nil, err
}
@@ -272,7 +199,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
}
}
- namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID())
+ namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("get namespace error: %w", err)
}
@@ -280,21 +207,16 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, err
}
- cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer())
- if err != nil {
- return nil, fmt.Errorf("get container properties: %w", err)
- }
-
request := aperequest.NewRequest(
nativeschema.MethodPutContainer,
aperequest.NewResource(
resourceName(namespace, ""),
- cnrProps,
+ make(map[string]string),
),
reqProps,
)
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
if err != nil {
return nil, fmt.Errorf("failed to get group ids: %w", err)
}
@@ -326,7 +248,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, apeErr(nativeschema.MethodPutContainer, s)
}
-func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
+func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
if vh == nil {
return "", nil, errMissingVerificationHeader
}
@@ -349,7 +271,7 @@ func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.O
}
pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(ctx, pkBytes)
+ isIR, err := ac.isInnerRingKey(pkBytes)
if err != nil {
return "", nil, err
}
@@ -370,7 +292,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
return err
}
- cont, err := ac.reader.Get(ctx, id)
+ cont, err := ac.reader.Get(id)
if err != nil {
return err
}
@@ -386,7 +308,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
namespace = cntNamespace
}
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
if err != nil {
return fmt.Errorf("failed to get group ids: %w", err)
}
@@ -400,7 +322,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
op,
aperequest.NewResource(
resourceName(namespace, id.EncodeToString()),
- getContainerProps(cont),
+ ac.getContainerProps(cont),
),
reqProps,
)
@@ -450,26 +372,10 @@ func resourceName(namespace string, container string) string {
return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container)
}
-func getContainerProps(c *containercore.Container) map[string]string {
- props := map[string]string{
+func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string {
+ return map[string]string{
nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(),
}
- for attrName, attrVal := range c.Value.Attributes() {
- name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName)
- props[name] = attrVal
- }
- return props
-}
-
-func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) {
- if cnrV2 == nil {
- return nil, errors.New("container is not set")
- }
- c := cnrSDK.Container{}
- if err := c.ReadFromV2(*cnrV2); err != nil {
- return nil, err
- }
- return getContainerProps(&containercore.Container{Value: c}), nil
}
func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader,
@@ -479,7 +385,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
if err != nil {
return nil, nil, err
}
- role, err := ac.getRole(ctx, actor, pk, cont, cnrID)
+ role, err := ac.getRole(actor, pk, cont, cnrID)
if err != nil {
return nil, nil, err
}
@@ -487,7 +393,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
if err != nil {
return nil, nil, err
}
@@ -499,13 +405,13 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
return reqProps, pk, nil
}
-func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
+func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
if cont.Value.Owner().Equals(*actor) {
return nativeschema.PropertyValueContainerRoleOwner, nil
}
pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(ctx, pkBytes)
+ isIR, err := ac.isInnerRingKey(pkBytes)
if err != nil {
return "", err
}
@@ -513,7 +419,7 @@ func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.Publ
return nativeschema.PropertyValueContainerRoleIR, nil
}
- isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont)
+ isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont)
if err != nil {
return "", err
}
@@ -607,8 +513,8 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) {
- innerRingKeys, err := ac.ir.InnerRingKeys(ctx)
+func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) {
+ innerRingKeys, err := ac.ir.InnerRingKeys()
if err != nil {
return false, err
}
@@ -622,11 +528,11 @@ func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, erro
return false, nil
}
-func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
+func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
binCnrID := make([]byte, sha256.Size)
cnrID.Encode(binCnrID)
- nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm)
+ nm, err := netmap.GetLatestNetworkMap(ac.nm)
if err != nil {
return false, err
}
@@ -637,7 +543,7 @@ func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.I
// then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container
- nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm)
+ nm, err = netmap.GetPreviousNetworkMap(ac.nm)
if err != nil {
return false, err
}
@@ -662,7 +568,7 @@ func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containerc
return false
}
-func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
+func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
var ownerSDK user.ID
if owner == nil {
return "", errOwnerIDIsNotSet
@@ -670,19 +576,24 @@ func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID)
if err := ownerSDK.ReadFromV2(*owner); err != nil {
return "", err
}
- addr := ownerSDK.ScriptHash()
+ addr, err := ownerSDK.ScriptHash()
+ if err != nil {
+ return "", err
+ }
namespace := ""
- subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
+ subject, err := ac.frostFSIDClient.GetSubject(addr)
if err == nil {
namespace = subject.Namespace
- } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
- return "", fmt.Errorf("get subject error: %w", err)
+ } else {
+ if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
+ return "", fmt.Errorf("get subject error: %w", err)
+ }
}
return namespace, nil
}
-func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
+func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) {
var ownerSDK user.ID
if owner == nil {
return "", errOwnerIDIsNotSet
@@ -690,8 +601,11 @@ func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.Own
if err := ownerSDK.ReadFromV2(*owner); err != nil {
return "", err
}
- addr := ownerSDK.ScriptHash()
- subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
+ addr, err := ownerSDK.ScriptHash()
+ if err != nil {
+ return "", err
+ }
+ subject, err := ac.frostFSIDClient.GetSubject(addr)
if err != nil {
return "", fmt.Errorf("get subject error: %w", err)
}
@@ -725,12 +639,12 @@ func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) erro
// validateNamespace validates if a namespace of a request actor equals to owner's namespace.
// An actor's namespace is calculated by a public key.
-func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error {
+func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error {
var actor user.ID
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
actorOwnerID := new(refs.OwnerID)
actor.WriteToV2(actorOwnerID)
- actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID)
+ actorNamespace, err := ac.namespaceByOwner(actorOwnerID)
if err != nil {
return fmt.Errorf("could not get actor namespace: %w", err)
}
@@ -741,11 +655,11 @@ func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
+func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk)
+ props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
index 6438c34ca..b6b42a559 100644
--- a/pkg/services/container/ape_test.go
+++ b/pkg/services/container/ape_test.go
@@ -54,8 +54,6 @@ func TestAPE(t *testing.T) {
t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace)
t.Run("deny list containers for owner with PK", testDenyListContainersForPK)
t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError)
- t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr)
- t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr)
}
const (
@@ -566,185 +564,6 @@ func testDenyGetContainerByIP(t *testing.T) {
require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
}
-func testDenyGetContainerSysZoneAttr(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19888,
- },
- },
- },
- },
- }
-
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindResource,
- Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
- Value: "eggplant",
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- resp, err := apeSrv.Get(ctxWithPeerInfo(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
- require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
-}
-
-func testDenyPutContainerSysZoneAttr(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
- owner := testContainer.Owner()
- ownerAddr := owner.ScriptHash()
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- ownerAddr: {},
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- ownerAddr: {},
- },
- }
-
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodPutContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- nativeschema.ResourceFormatRootContainers,
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindResource,
- Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
- Value: "eggplant",
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := initPutRequest(t, testContainer)
-
- resp, err := apeSrv.Put(ctxWithPeerInfo(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
- require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
-}
-
func testDenyGetContainerByGroupID(t *testing.T) {
t.Parallel()
srv := &srvStub{
@@ -859,7 +678,8 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
testContainer := containertest.Container()
owner := testContainer.Owner()
- ownerAddr := owner.ScriptHash()
+ ownerAddr, err := owner.ScriptHash()
+ require.NoError(t, err)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
ownerAddr: {},
@@ -870,7 +690,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
nm.currentEpoch = 100
nm.netmaps = map[uint64]*netmap.NetMap{}
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
Rules: []chain.Rule{
{
Status: chain.AccessDenied,
@@ -953,7 +773,7 @@ func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) {
require.NoError(t, err)
req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(t, testContainer)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
@@ -1037,7 +857,7 @@ func testDenyPutContainerInvalidNamespace(t *testing.T) {
require.NoError(t, err)
req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(t, testContainer)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
@@ -1259,11 +1079,6 @@ func (s *srvStub) List(context.Context, *container.ListRequest) (*container.List
return &container.ListResponse{}, nil
}
-func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error {
- s.calls["ListStream"]++
- return nil
-}
-
func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) {
s.calls["Put"]++
return &container.PutResponse{}, nil
@@ -1273,7 +1088,7 @@ type irStub struct {
keys [][]byte
}
-func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) {
+func (s *irStub) InnerRingKeys() ([][]byte, error) {
return s.keys, nil
}
@@ -1281,7 +1096,7 @@ type containerStub struct {
c map[cid.ID]*containercore.Container
}
-func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) {
+func (s *containerStub) Get(id cid.ID) (*containercore.Container, error) {
if v, ok := s.c[id]; ok {
return v, nil
}
@@ -1293,21 +1108,21 @@ type netmapStub struct {
currentEpoch uint64
}
-func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+func (s *netmapStub) GetNetMap(diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, errors.New("invalid diff")
}
- return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
+ return s.GetNetMapByEpoch(s.currentEpoch - diff)
}
-func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
+func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, errors.New("netmap not found")
}
-func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
+func (s *netmapStub) Epoch() (uint64, error) {
return s.currentEpoch, nil
}
@@ -1316,7 +1131,7 @@ type frostfsidStub struct {
subjectsExt map[util.Uint160]*client.SubjectExtended
}
-func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) {
+func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) {
s, ok := f.subjects[owner]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -1324,7 +1139,7 @@ func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*cl
return s, nil
}
-func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsidStub) GetSubjectExtended(owner util.Uint160) (*client.SubjectExtended, error) {
s, ok := f.subjectsExt[owner]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -1712,21 +1527,26 @@ func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.Put
return req
}
-func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 {
+func initOwnerIDScriptHash(t *testing.T, testContainer cnrSDK.Container) util.Uint160 {
var ownerSDK *user.ID
owner := testContainer.Owner()
ownerSDK = &owner
- return ownerSDK.ScriptHash()
+ sc, err := ownerSDK.ScriptHash()
+ require.NoError(t, err)
+ return sc
}
func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) {
var actorUserID user.ID
user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey()))
- actorScriptHash = actorUserID.ScriptHash()
+ var err error
+ actorScriptHash, err = actorUserID.ScriptHash()
+ require.NoError(t, err)
var ownerUserID user.ID
user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey()))
- ownerScriptHash = ownerUserID.ScriptHash()
+ ownerScriptHash, err = ownerUserID.ScriptHash()
+ require.NoError(t, err)
require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String())
return
}
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
index b235efa3c..03d3dc13d 100644
--- a/pkg/services/container/audit.go
+++ b/pkg/services/container/audit.go
@@ -35,7 +35,7 @@ func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest)
return res, err
}
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
+ audit.LogRequest(a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
@@ -47,7 +47,7 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req,
+ audit.LogRequest(a.log, container_grpc.ContainerService_Get_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
@@ -58,29 +58,18 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req,
+ audit.LogRequest(a.log, container_grpc.ContainerService_List_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
return res, err
}
-// ListStream implements Server.
-func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- err := a.next.ListStream(req, stream)
- if !a.enabled.Load() {
- return err
- }
- audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
- return err
-}
-
// Put implements Server.
func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
res, err := a.next.Put(ctx, req)
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req,
+ audit.LogRequest(a.log, container_grpc.ContainerService_Put_FullMethodName, req,
audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index cdd0d2514..70234d3de 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -14,7 +14,6 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
- ListStream(context.Context, *container.ListStreamRequest, ListStream) error
}
type executorSvc struct {
@@ -94,11 +93,3 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
s.respSvc.SetMeta(resp)
return resp, nil
}
-
-func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- err := s.exec.ListStream(stream.Context(), req, stream)
- if err != nil {
- return fmt.Errorf("could not execute ListStream request: %w", err)
- }
- return nil
-}
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index eaa608eba..adb808af3 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -25,20 +25,20 @@ type morphExecutor struct {
// Reader is an interface of read-only container storage.
type Reader interface {
containercore.Source
+ containercore.EACLSource
// ContainersOf returns a list of container identifiers belonging
// to the specified user of FrostFS system. Returns the identifiers
// of all FrostFS containers if pointer to owner identifier is nil.
- ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
- IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
+ ContainersOf(*user.ID) ([]cid.ID, error)
}
// Writer is an interface of container storage updater.
type Writer interface {
// Put stores specified container in the side chain.
- Put(context.Context, containercore.Container) (*cid.ID, error)
+ Put(containercore.Container) (*cid.ID, error)
// Delete removes specified container from the side chain.
- Delete(context.Context, containercore.RemovalWitness) error
+ Delete(containercore.RemovalWitness) error
}
func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
@@ -48,7 +48,7 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
+func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
sigV2 := body.GetSignature()
if sigV2 == nil {
// TODO(@cthulhu-rider): #468 use "const" error
@@ -81,7 +81,7 @@ func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *c
}
}
- idCnr, err := s.wrt.Put(ctx, cnr)
+ idCnr, err := s.wrt.Put(cnr)
if err != nil {
return nil, err
}
@@ -95,7 +95,7 @@ func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *c
return res, nil
}
-func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
+func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -125,7 +125,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body
rmWitness.Signature = body.GetSignature()
rmWitness.SessionToken = tok
- err = s.wrt.Delete(ctx, rmWitness)
+ err = s.wrt.Delete(rmWitness)
if err != nil {
return nil, err
}
@@ -133,7 +133,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body
return new(container.DeleteResponseBody), nil
}
-func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
+func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -146,7 +146,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody)
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- cnr, err := s.rdr.Get(ctx, id)
+ cnr, err := s.rdr.Get(id)
if err != nil {
return nil, err
}
@@ -173,7 +173,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody)
return res, nil
}
-func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
+func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errMissingUserID
@@ -186,7 +186,7 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod
return nil, fmt.Errorf("invalid user ID: %w", err)
}
- cnrs, err := s.rdr.ContainersOf(ctx, &id)
+ cnrs, err := s.rdr.ContainersOf(&id)
if err != nil {
return nil, err
}
@@ -201,56 +201,3 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod
return res, nil
}
-
-func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error {
- body := req.GetBody()
- idV2 := body.GetOwnerID()
- if idV2 == nil {
- return errMissingUserID
- }
-
- var id user.ID
-
- err := id.ReadFromV2(*idV2)
- if err != nil {
- return fmt.Errorf("invalid user ID: %w", err)
- }
-
- resBody := new(container.ListStreamResponseBody)
- r := new(container.ListStreamResponse)
- r.SetBody(resBody)
-
- var cidList []refs.ContainerID
-
- // Amount of containers to send at once.
- const batchSize = 1000
-
- processCID := func(id cid.ID) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var refID refs.ContainerID
- id.WriteToV2(&refID)
- cidList = append(cidList, refID)
- if len(cidList) == batchSize {
- r.GetBody().SetContainerIDs(cidList)
- cidList = cidList[:0]
- return stream.Send(r)
- }
- return nil
- }
-
- if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil {
- return err
- }
-
- if len(cidList) > 0 {
- r.GetBody().SetContainerIDs(cidList)
- return stream.Send(r)
- }
-
- return nil
-}
diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go
index 1f6fdb0be..87d307385 100644
--- a/pkg/services/container/morph/executor_test.go
+++ b/pkg/services/container/morph/executor_test.go
@@ -24,11 +24,11 @@ type mock struct {
containerSvcMorph.Reader
}
-func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) {
+func (m mock) Put(_ containerCore.Container) (*cid.ID, error) {
return new(cid.ID), nil
}
-func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error {
+func (m mock) Delete(_ containerCore.RemovalWitness) error {
return nil
}
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index d9208077d..78fd3d34c 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -3,7 +3,6 @@ package container
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
)
@@ -13,11 +12,4 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
- ListStream(*container.ListStreamRequest, ListStream) error
-}
-
-// ListStream is an interface of FrostFS API v2 compatible search streamer.
-type ListStream interface {
- util.ServerStream
- Send(*container.ListStreamResponse) error
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index 85fe7ae87..c478c0e1c 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -56,40 +56,3 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co
resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req))
return resp, s.sigSvc.SignResponse(resp, err)
}
-
-func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(container.ListStreamResponse)
- _ = s.sigSvc.SignResponse(resp, err)
- return stream.Send(resp)
- }
-
- ss := &listStreamSigner{
- ListStream: stream,
- sigSvc: s.sigSvc,
- }
- err := s.svc.ListStream(req, ss)
- if err != nil || !ss.nonEmptyResp {
- return ss.send(new(container.ListStreamResponse), err)
- }
- return nil
-}
-
-type listStreamSigner struct {
- ListStream
- sigSvc *util.SignService
-
- nonEmptyResp bool // set on first Send call
-}
-
-func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error {
- s.nonEmptyResp = true
- return s.send(resp, nil)
-}
-
-func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error {
- if err := s.sigSvc.SignResponse(resp, err); err != nil {
- return err
- }
- return s.ListStream.Send(resp)
-}
diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go
deleted file mode 100644
index 4f8708da7..000000000
--- a/pkg/services/container/transport_splitter.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package container
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
-)
-
-type (
- TransportSplitter struct {
- next Server
-
- respSvc *response.Service
- cnrAmount uint32
- }
-
- listStreamMsgSizeCtrl struct {
- util.ServerStream
- stream ListStream
- respSvc *response.Service
- cnrAmount uint32
- }
-)
-
-func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server {
- return &TransportSplitter{
- next: next,
- respSvc: respSvc,
- cnrAmount: cnrAmount,
- }
-}
-
-func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
- return s.next.Put(ctx, req)
-}
-
-func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
- return s.next.Delete(ctx, req)
-}
-
-func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
- return s.next.Get(ctx, req)
-}
-
-func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
- return s.next.List(ctx, req)
-}
-
-func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- return s.next.ListStream(req, &listStreamMsgSizeCtrl{
- ServerStream: stream,
- stream: stream,
- respSvc: s.respSvc,
- cnrAmount: s.cnrAmount,
- })
-}
-
-func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error {
- s.respSvc.SetMeta(resp)
- body := resp.GetBody()
- ids := body.GetContainerIDs()
-
- var newResp *container.ListStreamResponse
-
- for {
- if newResp == nil {
- newResp = new(container.ListStreamResponse)
- newResp.SetBody(body)
- }
-
- cut := min(s.cnrAmount, uint32(len(ids)))
-
- body.SetContainerIDs(ids[:cut])
- newResp.SetMetaHeader(resp.GetMetaHeader())
- newResp.SetVerificationHeader(resp.GetVerificationHeader())
-
- if err := s.stream.Send(newResp); err != nil {
- return fmt.Errorf("TransportSplitter: %w", err)
- }
-
- ids = ids[cut:]
-
- if len(ids) == 0 {
- break
- }
- }
-
- return nil
-}
diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go
index d9f65a2fc..e54fa9824 100644
--- a/pkg/services/control/ir/server/audit.go
+++ b/pkg/services/control/ir/server/audit.go
@@ -36,7 +36,7 @@ func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheck
if !a.enabled.Load() {
return res, err
}
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
+ audit.LogRequestWithKey(a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
return res, err
}
@@ -79,7 +79,7 @@ func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveC
}
}
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
+ audit.LogRequestWithKey(a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
return res, err
}
@@ -90,7 +90,7 @@ func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRe
return res, err
}
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
+ audit.LogRequestWithKey(a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil)
return res, err
}
@@ -102,7 +102,7 @@ func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequ
return res, err
}
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
+ audit.LogRequestWithKey(a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
nil, err == nil)
return res, err
}
diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go
index 0509d2646..63be22411 100644
--- a/pkg/services/control/ir/server/calls.go
+++ b/pkg/services/control/ir/server/calls.go
@@ -40,7 +40,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest)
// TickEpoch forces a new epoch.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
+func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -48,12 +48,12 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (
resp := new(control.TickEpochResponse)
resp.SetBody(new(control.TickEpochResponse_Body))
- epoch, err := s.netmapClient.Epoch(ctx)
+ epoch, err := s.netmapClient.Epoch()
if err != nil {
return nil, fmt.Errorf("getting current epoch: %w", err)
}
- vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub())
+ vub, err := s.netmapClient.NewEpochControl(epoch+1, req.GetBody().GetVub())
if err != nil {
return nil, fmt.Errorf("forcing new epoch: %w", err)
}
@@ -69,7 +69,7 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (
// RemoveNode forces a node removal.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
+func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -77,7 +77,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
resp := new(control.RemoveNodeResponse)
resp.SetBody(new(control.RemoveNodeResponse_Body))
- nm, err := s.netmapClient.NetMap(ctx)
+ nm, err := s.netmapClient.NetMap()
if err != nil {
return nil, fmt.Errorf("getting netmap: %w", err)
}
@@ -95,7 +95,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
return nil, status.Error(codes.FailedPrecondition, "node is already offline")
}
- vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub())
+ vub, err := s.netmapClient.ForceRemovePeer(nodeInfo, req.GetBody().GetVub())
if err != nil {
return nil, fmt.Errorf("forcing node removal: %w", err)
}
@@ -109,7 +109,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
}
// RemoveContainer forces a container removal.
-func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
+func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -124,7 +124,7 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain
return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error())
}
var err error
- vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
+ vub, err = s.removeContainer(containerID, req.GetBody().GetVub())
if err != nil {
return nil, err
}
@@ -138,13 +138,13 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain
return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error())
}
- cids, err := s.containerClient.ContainersOf(ctx, &owner)
+ cids, err := s.containerClient.ContainersOf(&owner)
if err != nil {
return nil, fmt.Errorf("failed to get owner's containers: %w", err)
}
for _, containerID := range cids {
- vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
+ vub, err = s.removeContainer(containerID, req.GetBody().GetVub())
if err != nil {
return nil, err
}
@@ -162,13 +162,13 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain
return resp, nil
}
-func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) {
+func (s *Server) removeContainer(containerID cid.ID, vub uint32) (uint32, error) {
var prm container.DeletePrm
prm.SetCID(containerID[:])
prm.SetControlTX(true)
prm.SetVUB(vub)
- vub, err := s.containerClient.Delete(ctx, prm)
+ vub, err := s.containerClient.Delete(prm)
if err != nil {
return 0, fmt.Errorf("forcing container removal: %w", err)
}
diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go
index 0cfca71c1..c2a4f88a6 100644
--- a/pkg/services/control/ir/server/server.go
+++ b/pkg/services/control/ir/server/server.go
@@ -35,7 +35,8 @@ func panicOnPrmValue(n string, v any) {
// the parameterized private key.
func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server {
// verify required parameters
- if prm.healthChecker == nil {
+ switch {
+ case prm.healthChecker == nil:
panicOnPrmValue("health checker", prm.healthChecker)
}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 0c4236d0e..514061db4 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -1,8 +1,6 @@
package control
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
)
@@ -17,6 +15,7 @@ const (
rpcListShards = "ListShards"
rpcSetShardMode = "SetShardMode"
rpcSynchronizeTree = "SynchronizeTree"
+ rpcEvacuateShard = "EvacuateShard"
rpcStartShardEvacuation = "StartShardEvacuation"
rpcGetShardEvacuationStatus = "GetShardEvacuationStatus"
rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus"
@@ -32,7 +31,6 @@ const (
rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides"
rpcDetachShards = "DetachShards"
rpcStartShardRebuild = "StartShardRebuild"
- rpcListShardsForObject = "ListShardsForObject"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -76,7 +74,6 @@ func SetNetmapStatus(
// GetNetmapStatus executes ControlService.GetNetmapStatus RPC.
func GetNetmapStatus(
- _ context.Context,
cli *client.Client,
req *GetNetmapStatusRequest,
opts ...client.CallOption,
@@ -165,6 +162,19 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl
return wResp.message, nil
}
+// EvacuateShard executes ControlService.EvacuateShard RPC.
+func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) {
+ wResp := newResponseWrapper[EvacuateShardResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
// StartShardEvacuation executes ControlService.StartShardEvacuation RPC.
func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) {
wResp := newResponseWrapper[StartShardEvacuationResponse]()
@@ -365,22 +375,3 @@ func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts .
return wResp.message, nil
}
-
-// ListShardsForObject executes ControlService.ListShardsForObject RPC.
-func ListShardsForObject(
- cli *client.Client,
- req *ListShardsForObjectRequest,
- opts ...client.CallOption,
-) (*ListShardsForObjectResponse, error) {
- wResp := newResponseWrapper[ListShardsForObjectResponse]()
-
- wReq := &requestWrapper{
- m: req,
- }
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go
index ffd36962b..a4111bddb 100644
--- a/pkg/services/control/server/detach_shards.go
+++ b/pkg/services/control/server/detach_shards.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
+func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -19,7 +19,7 @@ func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequ
shardIDs := s.getShardIDList(req.GetBody().GetShard_ID())
- if err := s.s.DetachShards(ctx, shardIDs); err != nil {
+ if err := s.s.DetachShards(shardIDs); err != nil {
if errors.As(err, new(logicerr.Logical)) {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
new file mode 100644
index 000000000..ae3413373
--- /dev/null
+++ b/pkg/services/control/server/evacuate.go
@@ -0,0 +1,188 @@
+package control
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
+
+func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ prm := engine.EvacuateShardPrm{
+ ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
+ IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ ObjectsHandler: s.replicateObject,
+ Scope: engine.EvacuateScopeObjects,
+ }
+
+ res, err := s.s.Evacuate(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.EvacuateShardResponse{
+ Body: &control.EvacuateShardResponse_Body{
+ Count: uint32(res.ObjectsEvacuated()),
+ },
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
+ cid, ok := obj.ContainerID()
+ if !ok {
+ // Return nil to prevent situations where a shard can't be evacuated
+ // because of a single bad/corrupted object.
+ return false, nil
+ }
+
+ nodes, err := s.getContainerNodes(cid)
+ if err != nil {
+ return false, err
+ }
+
+ if len(nodes) == 0 {
+ return false, nil
+ }
+
+ var res replicatorResult
+ task := replicator.Task{
+ NumCopies: 1,
+ Addr: addr,
+ Obj: obj,
+ Nodes: nodes,
+ }
+ s.replicator.HandleReplicationTask(ctx, task, &res)
+
+ if res.count == 0 {
+ return false, errors.New("object was not replicated")
+ }
+ return true, nil
+}
+
+func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
+ nodes, err := s.getContainerNodes(contID)
+ if err != nil {
+ return false, "", err
+ }
+ if len(nodes) == 0 {
+ return false, "", nil
+ }
+
+ for _, node := range nodes {
+ err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
+ if err == nil {
+ return true, hex.EncodeToString(node.PublicKey()), nil
+ }
+ }
+ return false, "", err
+}
+
+func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
+ rawCID := make([]byte, sha256.Size)
+ contID.Encode(rawCID)
+
+ var height uint64
+ for {
+ op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
+ if err != nil {
+ return err
+ }
+
+ if op.Time == 0 {
+ return nil
+ }
+
+ req := &tree.ApplyRequest{
+ Body: &tree.ApplyRequest_Body{
+ ContainerId: rawCID,
+ TreeId: treeID,
+ Operation: &tree.LogMove{
+ ParentId: op.Parent,
+ Meta: op.Meta.Bytes(),
+ ChildId: op.Child,
+ },
+ },
+ }
+
+ err = tree.SignMessage(req, s.key)
+ if err != nil {
+ return fmt.Errorf("can't message apply request: %w", err)
+ }
+
+ err = s.treeService.ReplicateTreeOp(ctx, node, req)
+ if err != nil {
+ return err
+ }
+
+ height = op.Time + 1
+ }
+}
+
+func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
+ nm, err := s.netMapSrc.GetNetMap(0)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := s.cnrSrc.Get(contID)
+ if err != nil {
+ return nil, err
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ contID.Encode(binCnr)
+
+ ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
+ if err != nil {
+ return nil, errFailedToBuildListOfContainerNodes
+ }
+
+ nodes := placement.FlattenNodes(ns)
+ bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
+ for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
+ if bytes.Equal(nodes[i].PublicKey(), bs) {
+ copy(nodes[i:], nodes[i+1:])
+ nodes = nodes[:len(nodes)-1]
+ }
+ }
+ return nodes, nil
+}
+
+type replicatorResult struct {
+ count int
+}
+
+// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
+func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
+ r.count++
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index f3ba9015e..146ac7e16 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -1,32 +1,17 @@
package control
import (
- "bytes"
"context"
- "crypto/sha256"
- "encoding/hex"
"errors"
- "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
-var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
-
func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) {
err := s.isValidRequest(req)
if err != nil {
@@ -42,13 +27,15 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
IgnoreErrors: req.GetBody().GetIgnoreErrors(),
ObjectsHandler: s.replicateObject,
TreeHandler: s.replicateTree,
+ Async: true,
Scope: engine.EvacuateScope(req.GetBody().GetScope()),
ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
RepOneOnly: req.GetBody().GetRepOneOnly(),
}
- if err = s.s.Evacuate(ctx, prm); err != nil {
+ _, err = s.s.Evacuate(ctx, prm)
+ if err != nil {
var logicalErr logicerr.Logical
if errors.As(err, &logicalErr) {
return nil, status.Error(codes.Aborted, err.Error())
@@ -148,133 +135,3 @@ func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.Re
}
return resp, nil
}
-
-func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- cid, ok := obj.ContainerID()
- if !ok {
- // Return nil to prevent situations where a shard can't be evacuated
- // because of a single bad/corrupted object.
- return false, nil
- }
-
- nodes, err := s.getContainerNodes(ctx, cid)
- if err != nil {
- return false, err
- }
-
- if len(nodes) == 0 {
- return false, nil
- }
-
- var res replicatorResult
- task := replicator.Task{
- NumCopies: 1,
- Addr: addr,
- Obj: obj,
- Nodes: nodes,
- }
- s.replicator.HandleReplicationTask(ctx, task, &res)
-
- if res.count == 0 {
- return false, errors.New("object was not replicated")
- }
- return true, nil
-}
-
-func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
- nodes, err := s.getContainerNodes(ctx, contID)
- if err != nil {
- return false, "", err
- }
- if len(nodes) == 0 {
- return false, "", nil
- }
-
- for _, node := range nodes {
- err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
- if err == nil {
- return true, hex.EncodeToString(node.PublicKey()), nil
- }
- }
- return false, "", err
-}
-
-func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
- rawCID := make([]byte, sha256.Size)
- contID.Encode(rawCID)
-
- var height uint64
- for {
- op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
- if err != nil {
- return err
- }
-
- if op.Time == 0 {
- return nil
- }
-
- req := &tree.ApplyRequest{
- Body: &tree.ApplyRequest_Body{
- ContainerId: rawCID,
- TreeId: treeID,
- Operation: &tree.LogMove{
- ParentId: op.Parent,
- Meta: op.Bytes(),
- ChildId: op.Child,
- },
- },
- }
-
- err = tree.SignMessage(req, s.key)
- if err != nil {
- return fmt.Errorf("can't message apply request: %w", err)
- }
-
- err = s.treeService.ReplicateTreeOp(ctx, node, req)
- if err != nil {
- return err
- }
-
- height = op.Time + 1
- }
-}
-
-func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) {
- nm, err := s.netMapSrc.GetNetMap(ctx, 0)
- if err != nil {
- return nil, err
- }
-
- c, err := s.cnrSrc.Get(ctx, contID)
- if err != nil {
- return nil, err
- }
-
- binCnr := make([]byte, sha256.Size)
- contID.Encode(binCnr)
-
- ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
- if err != nil {
- return nil, errFailedToBuildListOfContainerNodes
- }
-
- nodes := placement.FlattenNodes(ns)
- bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
- if bytes.Equal(nodes[i].PublicKey(), bs) {
- copy(nodes[i:], nodes[i+1:])
- nodes = nodes[:len(nodes)-1]
- }
- }
- return nodes, nil
-}
-
-type replicatorResult struct {
- count int
-}
-
-// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
-func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
- r.count++
-}
diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go
index a8ef7809e..d9fefc38e 100644
--- a/pkg/services/control/server/gc.go
+++ b/pkg/services/control/server/gc.go
@@ -42,7 +42,8 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques
prm.WithForceRemoval()
prm.WithAddress(addrList[i])
- if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil {
+ _, err := s.s.Delete(ctx, prm)
+ if err != nil && firstErr == nil {
firstErr = err
}
}
diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go
index 5e0496910..1c038253a 100644
--- a/pkg/services/control/server/get_netmap_status.go
+++ b/pkg/services/control/server/get_netmap_status.go
@@ -10,12 +10,12 @@ import (
)
// GetNetmapStatus gets node status in FrostFS network.
-func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
+func (s *Server) GetNetmapStatus(_ context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
- st, epoch, err := s.nodeState.GetNetmapStatus(ctx)
+ st, epoch, err := s.nodeState.GetNetmapStatus()
if err != nil {
return nil, err
}
diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go
deleted file mode 100644
index 39565ed50..000000000
--- a/pkg/services/control/server/list_shards_for_object.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- var obj oid.ID
- err = obj.DecodeString(req.GetBody().GetObjectId())
- if err != nil {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
-
- var cnr cid.ID
- err = cnr.DecodeString(req.GetBody().GetContainerId())
- if err != nil {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
-
- resp := new(control.ListShardsForObjectResponse)
- body := new(control.ListShardsForObjectResponse_Body)
- resp.SetBody(body)
-
- var objAddr oid.Address
- objAddr.SetContainer(cnr)
- objAddr.SetObject(obj)
- info, err := s.s.ListShardsForObject(ctx, objAddr)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- if len(info) == 0 {
- return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject)
- }
-
- body.SetShard_ID(shardInfoToProto(info))
-
- // Sign the response
- if err := ctrlmessage.Sign(s.key, resp); err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func shardInfoToProto(infos []shard.Info) [][]byte {
- shardInfos := make([][]byte, 0, len(infos))
- for _, info := range infos {
- shardInfos = append(shardInfos, *info.ID)
- }
-
- return shardInfos
-}
diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go
index 59d701bc6..b6fdcb246 100644
--- a/pkg/services/control/server/server.go
+++ b/pkg/services/control/server/server.go
@@ -1,7 +1,6 @@
package control
import (
- "context"
"crypto/ecdsa"
"sync/atomic"
@@ -46,13 +45,13 @@ type NodeState interface {
//
// If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed
// in the network settings, the node additionally starts local maintenance.
- SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error
+ SetNetmapStatus(st control.NetmapStatus) error
// ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE)
// but starts local maintenance regardless of the network settings.
- ForceMaintenance(ctx context.Context) error
+ ForceMaintenance() error
- GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error)
+ GetNetmapStatus() (control.NetmapStatus, uint64, error)
}
// LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine
diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go
index 529041dca..3fd69df12 100644
--- a/pkg/services/control/server/set_netmap_status.go
+++ b/pkg/services/control/server/set_netmap_status.go
@@ -12,7 +12,7 @@ import (
// SetNetmapStatus sets node status in FrostFS network.
//
// If request is unsigned or signed by disallowed key, permission error returns.
-func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
+func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
// verify request
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -29,9 +29,9 @@ func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStat
"force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE)
}
- err = s.nodeState.ForceMaintenance(ctx)
+ err = s.nodeState.ForceMaintenance()
} else {
- err = s.nodeState.SetNetmapStatus(ctx, st)
+ err = s.nodeState.SetNetmapStatus(st)
}
if err != nil {
diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go
index 4f8796263..52835c41d 100644
--- a/pkg/services/control/server/set_shard_mode.go
+++ b/pkg/services/control/server/set_shard_mode.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
+func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
// verify request
err := s.isValidRequest(req)
if err != nil {
@@ -38,7 +38,7 @@ func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequ
}
for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
- err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter())
+ err = s.s.SetShardMode(shardID, m, req.GetBody().GetResetErrorCounter())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 4c539acfc..ae1939e13 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -30,6 +30,11 @@ service ControlService {
// Synchronizes all log operations for the specified tree.
rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use
+ // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ rpc EvacuateShard(EvacuateShardRequest) returns (EvacuateShardResponse);
+
// StartShardEvacuation starts moving all data from one shard to the others.
rpc StartShardEvacuation(StartShardEvacuationRequest)
returns (StartShardEvacuationResponse);
@@ -89,9 +94,6 @@ service ControlService {
// StartShardRebuild starts shard rebuild process.
rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse);
-
- // ListShardsForObject returns shard info where object is stored.
- rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse);
}
// Health check request.
@@ -732,23 +734,3 @@ message StartShardRebuildResponse {
Signature signature = 2;
}
-
-message ListShardsForObjectRequest {
- message Body {
- string object_id = 1;
- string container_id = 2;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
-
-message ListShardsForObjectResponse {
- message Body {
- // List of the node's shards storing object.
- repeated bytes shard_ID = 1;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 44849d591..0b4e3cf32 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -17303,727 +17303,3 @@ func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
in.Consumed()
}
}
-
-type ListShardsForObjectRequest_Body struct {
- ObjectId string `json:"objectId"`
- ContainerId string `json:"containerId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.StringSize(1, x.ObjectId)
- size += proto.StringSize(2, x.ContainerId)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.ObjectId) != 0 {
- mm.AppendString(1, x.ObjectId)
- }
- if len(x.ContainerId) != 0 {
- mm.AppendString(2, x.ContainerId)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ObjectId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ObjectId")
- }
- x.ObjectId = data
- case 2: // ContainerId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- }
- }
- return nil
-}
-func (x *ListShardsForObjectRequest_Body) GetObjectId() string {
- if x != nil {
- return x.ObjectId
- }
- return ""
-}
-func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) {
- x.ObjectId = v
-}
-func (x *ListShardsForObjectRequest_Body) GetContainerId() string {
- if x != nil {
- return x.ContainerId
- }
- return ""
-}
-func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) {
- x.ContainerId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"objectId\":"
- out.RawString(prefix)
- out.String(x.ObjectId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- out.String(x.ContainerId)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "objectId":
- {
- var f string
- f = in.String()
- x.ObjectId = f
- }
- case "containerId":
- {
- var f string
- f = in.String()
- x.ContainerId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectRequest struct {
- Body *ListShardsForObjectRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil)
- _ json.Marshaler = (*ListShardsForObjectRequest)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListShardsForObjectRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListShardsForObjectRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) {
- x.Body = v
-}
-func (x *ListShardsForObjectRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListShardsForObjectRequest_Body
- f = new(ListShardsForObjectRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectResponse_Body struct {
- Shard_ID [][]byte `json:"shardID"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.RepeatedBytesSize(1, x.Shard_ID)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- }
- }
- return nil
-}
-func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectResponse struct {
- Body *ListShardsForObjectResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil)
- _ json.Marshaler = (*ListShardsForObjectResponse)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListShardsForObjectResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListShardsForObjectResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) {
- x.Body = v
-}
-func (x *ListShardsForObjectResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListShardsForObjectResponse_Body
- f = new(ListShardsForObjectResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index 045662ccf..f5cfefa85 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -26,6 +26,7 @@ const (
ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
+ ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard"
ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation"
ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus"
ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus"
@@ -41,7 +42,6 @@ const (
ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache"
ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards"
ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild"
- ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject"
)
// ControlServiceClient is the client API for ControlService service.
@@ -62,6 +62,10 @@ type ControlServiceClient interface {
SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error)
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use
+ // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
@@ -96,8 +100,6 @@ type ControlServiceClient interface {
DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error)
// StartShardRebuild starts shard rebuild process.
StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error)
- // ListShardsForObject returns shard info where object is stored.
- ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error)
}
type controlServiceClient struct {
@@ -171,6 +173,15 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron
return out, nil
}
+func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) {
+ out := new(EvacuateShardResponse)
+ err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) {
out := new(StartShardEvacuationResponse)
err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...)
@@ -306,15 +317,6 @@ func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartS
return out, nil
}
-func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) {
- out := new(ListShardsForObjectResponse)
- err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
// ControlServiceServer is the server API for ControlService service.
// All implementations should embed UnimplementedControlServiceServer
// for forward compatibility
@@ -333,6 +335,10 @@ type ControlServiceServer interface {
SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error)
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use
+ // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
@@ -367,8 +373,6 @@ type ControlServiceServer interface {
DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error)
// StartShardRebuild starts shard rebuild process.
StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error)
- // ListShardsForObject returns shard info where object is stored.
- ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -396,6 +400,9 @@ func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShard
func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented")
}
+func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented")
+}
func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented")
}
@@ -441,9 +448,6 @@ func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachSh
func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented")
}
-func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented")
-}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -582,6 +586,24 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EvacuateShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).EvacuateShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_EvacuateShard_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StartShardEvacuationRequest)
if err := dec(in); err != nil {
@@ -852,24 +874,6 @@ func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
-func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListShardsForObjectRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).ListShardsForObject(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_ListShardsForObject_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -905,6 +909,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SynchronizeTree",
Handler: _ControlService_SynchronizeTree_Handler,
},
+ {
+ MethodName: "EvacuateShard",
+ Handler: _ControlService_EvacuateShard_Handler,
+ },
{
MethodName: "StartShardEvacuation",
Handler: _ControlService_StartShardEvacuation_Handler,
@@ -965,10 +973,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "StartShardRebuild",
Handler: _ControlService_StartShardRebuild_Handler,
},
- {
- MethodName: "ListShardsForObject",
- Handler: _ControlService_ListShardsForObject_Handler,
- },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go
index 1b92fdaad..5223047df 100644
--- a/pkg/services/netmap/executor.go
+++ b/pkg/services/netmap/executor.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
@@ -43,16 +42,14 @@ type NetworkInfo interface {
// Dump must return recent network information in FrostFS API v2 NetworkInfo structure.
//
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
- Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error)
+ Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
}
func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server {
- // this should never happen, otherwise it's a programmer's bug
- msg := "BUG: can't create netmap execution service"
- assert.False(s == nil, msg, "node state is nil")
- assert.False(netInfo == nil, msg, "network info is nil")
- assert.False(respSvc == nil, msg, "response service is nil")
- assert.True(version.IsValid(v), msg, "invalid version")
+ if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil {
+ // this should never happen, otherwise it programmers bug
+ panic("can't create netmap execution service")
+ }
res := &executorSvc{
state: s,
@@ -85,7 +82,7 @@ func (s *executorSvc) LocalNodeInfo(
}
func (s *executorSvc) NetworkInfo(
- ctx context.Context,
+ _ context.Context,
req *netmap.NetworkInfoRequest,
) (*netmap.NetworkInfoResponse, error) {
verV2 := req.GetMetaHeader().GetVersion()
@@ -98,7 +95,7 @@ func (s *executorSvc) NetworkInfo(
return nil, fmt.Errorf("can't read version: %w", err)
}
- ni, err := s.netInfo.Dump(ctx, ver)
+ ni, err := s.netInfo.Dump(ver)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go
new file mode 100644
index 000000000..921545c8b
--- /dev/null
+++ b/pkg/services/object/acl/acl.go
@@ -0,0 +1,262 @@
+package acl
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "errors"
+ "fmt"
+ "io"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ eaclV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/eacl/v2"
+ v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
+ bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// Checker implements v2.ACLChecker interfaces and provides
+// ACL/eACL validation functionality.
+type Checker struct {
+ eaclSrc container.EACLSource
+ validator *eaclSDK.Validator
+ localStorage *engine.StorageEngine
+ state netmap.State
+}
+
+type localStorage struct {
+ ls *engine.StorageEngine
+}
+
+func (s *localStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ if s.ls == nil {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ return engine.Head(ctx, s.ls, addr)
+}
+
+// Various EACL check errors.
+var (
+ errEACLDeniedByRule = errors.New("denied by rule")
+ errBearerExpired = errors.New("bearer token has expired")
+ errBearerInvalidSignature = errors.New("bearer token has invalid signature")
+ errBearerInvalidContainerID = errors.New("bearer token was created for another container")
+ errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
+ errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
+)
+
+// NewChecker creates Checker.
+// Panics if at least one of the parameter is nil.
+func NewChecker(
+ state netmap.State,
+ eaclSrc container.EACLSource,
+ validator *eaclSDK.Validator,
+ localStorage *engine.StorageEngine,
+) *Checker {
+ return &Checker{
+ eaclSrc: eaclSrc,
+ validator: validator,
+ localStorage: localStorage,
+ state: state,
+ }
+}
+
+// CheckBasicACL is a main check function for basic ACL.
+func (c *Checker) CheckBasicACL(info v2.RequestInfo) bool {
+ // check basic ACL permissions
+ return info.BasicACL().IsOpAllowed(info.Operation(), info.RequestRole())
+}
+
+// StickyBitCheck validates owner field in the request if sticky bit is enabled.
+func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool {
+ // According to FrostFS specification sticky bit has no effect on system nodes
+ // for correct intra-container work with objects (in particular, replication).
+ if info.RequestRole() == acl.RoleContainer {
+ return true
+ }
+
+ if !info.BasicACL().Sticky() {
+ return true
+ }
+
+ if len(info.SenderKey()) == 0 {
+ return false
+ }
+
+ requestSenderKey := unmarshalPublicKey(info.SenderKey())
+
+ return isOwnerFromKey(owner, requestSenderKey)
+}
+
+// CheckEACL is a main check function for extended ACL.
+func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
+ basicACL := reqInfo.BasicACL()
+ if !basicACL.Extendable() {
+ return nil
+ }
+
+ bearerTok := reqInfo.Bearer()
+ impersonate := bearerTok != nil && bearerTok.Impersonate()
+
+ // if bearer token is not allowed, then ignore it
+ if impersonate || !basicACL.AllowedBearerRules(reqInfo.Operation()) {
+ reqInfo.CleanBearer()
+ }
+
+ var table eaclSDK.Table
+ cnr := reqInfo.ContainerID()
+
+ if bearerTok == nil {
+ eaclInfo, err := c.eaclSrc.GetEACL(cnr)
+ if err != nil {
+ if client.IsErrEACLNotFound(err) {
+ return nil
+ }
+ return err
+ }
+
+ table = *eaclInfo.Value
+ } else {
+ table = bearerTok.EACLTable()
+ }
+
+ // if bearer token is not present, isValidBearer returns true
+ if err := isValidBearer(reqInfo, c.state); err != nil {
+ return err
+ }
+
+ hdrSrc, err := c.getHeaderSource(cnr, msg, reqInfo)
+ if err != nil {
+ return err
+ }
+
+ eaclRole := getRole(reqInfo)
+
+ action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit).
+ WithRole(eaclRole).
+ WithOperation(eaclSDK.Operation(reqInfo.Operation())).
+ WithContainerID(&cnr).
+ WithSenderKey(reqInfo.SenderKey()).
+ WithHeaderSource(hdrSrc).
+ WithEACLTable(&table),
+ )
+
+ if action != eaclSDK.ActionAllow {
+ return errEACLDeniedByRule
+ }
+ return nil
+}
+
+func getRole(reqInfo v2.RequestInfo) eaclSDK.Role {
+ var eaclRole eaclSDK.Role
+ switch op := reqInfo.RequestRole(); op {
+ default:
+ eaclRole = eaclSDK.Role(op)
+ case acl.RoleOwner:
+ eaclRole = eaclSDK.RoleUser
+ case acl.RoleInnerRing, acl.RoleContainer:
+ eaclRole = eaclSDK.RoleSystem
+ case acl.RoleOthers:
+ eaclRole = eaclSDK.RoleOthers
+ }
+ return eaclRole
+}
+
+func (c *Checker) getHeaderSource(cnr cid.ID, msg any, reqInfo v2.RequestInfo) (eaclSDK.TypedHeaderSource, error) {
+ var xHeaderSource eaclV2.XHeaderSource
+ if req, ok := msg.(eaclV2.Request); ok {
+ xHeaderSource = eaclV2.NewRequestXHeaderSource(req)
+ } else {
+ xHeaderSource = eaclV2.NewResponseXHeaderSource(msg.(eaclV2.Response), reqInfo.Request().(eaclV2.Request))
+ }
+
+ hdrSrc, err := eaclV2.NewMessageHeaderSource(&localStorage{ls: c.localStorage}, xHeaderSource, cnr, eaclV2.WithOID(reqInfo.ObjectID()))
+ if err != nil {
+ return nil, fmt.Errorf("can't parse headers: %w", err)
+ }
+ return hdrSrc, nil
+}
+
+// isValidBearer checks whether bearer token was correctly signed by authorized
+// entity. This method might be defined on whole ACL service because it will
+// require fetching current epoch to check lifetime.
+func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error {
+ ownerCnr := reqInfo.ContainerOwner()
+
+ token := reqInfo.Bearer()
+
+ // 0. Check if bearer token is present in reqInfo.
+ if token == nil {
+ return nil
+ }
+
+ // 1. First check token lifetime. Simplest verification.
+ if token.InvalidAt(st.CurrentEpoch()) {
+ return errBearerExpired
+ }
+
+ // 2. Then check if bearer token is signed correctly.
+ if !token.VerifySignature() {
+ return errBearerInvalidSignature
+ }
+
+ // 3. Then check if container is either empty or equal to the container in the request.
+ cnr, isSet := token.EACLTable().CID()
+ if isSet && !cnr.Equals(reqInfo.ContainerID()) {
+ return errBearerInvalidContainerID
+ }
+
+ // 4. Then check if container owner signed this token.
+ if !bearerSDK.ResolveIssuer(*token).Equals(ownerCnr) {
+ // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
+ return errBearerNotSignedByOwner
+ }
+
+ // 5. Then check if request sender has rights to use this token.
+ var keySender frostfsecdsa.PublicKey
+
+ err := keySender.Decode(reqInfo.SenderKey())
+ if err != nil {
+ return fmt.Errorf("decode sender public key: %w", err)
+ }
+
+ var usrSender user.ID
+ user.IDFromKey(&usrSender, ecdsa.PublicKey(keySender))
+
+ if !token.AssertUser(usrSender) {
+ // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
+ return errBearerInvalidOwner
+ }
+
+ return nil
+}
+
+func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
+ if key == nil {
+ return false
+ }
+
+ var id2 user.ID
+ user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
+
+ return id.Equals(id2)
+}
+
+func unmarshalPublicKey(bs []byte) *keys.PublicKey {
+ pub, err := keys.NewPublicKeyFromBytes(bs, elliptic.P256())
+ if err != nil {
+ return nil
+ }
+ return pub
+}
diff --git a/pkg/services/object/acl/acl_test.go b/pkg/services/object/acl/acl_test.go
new file mode 100644
index 000000000..d63cb1285
--- /dev/null
+++ b/pkg/services/object/acl/acl_test.go
@@ -0,0 +1,89 @@
+package acl
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "github.com/stretchr/testify/require"
+)
+
+type emptyEACLSource struct{}
+
+func (e emptyEACLSource) GetEACL(_ cid.ID) (*container.EACL, error) {
+ return nil, nil
+}
+
+type emptyNetmapState struct{}
+
+func (e emptyNetmapState) CurrentEpoch() uint64 {
+ return 0
+}
+
+func TestStickyCheck(t *testing.T) {
+ checker := NewChecker(
+ emptyNetmapState{},
+ emptyEACLSource{},
+ eaclSDK.NewValidator(),
+ &engine.StorageEngine{})
+
+ t.Run("system role", func(t *testing.T) {
+ var info v2.RequestInfo
+
+ info.SetSenderKey(make([]byte, 33)) // any non-empty key
+ info.SetRequestRole(acl.RoleContainer)
+
+ require.True(t, checker.StickyBitCheck(info, usertest.ID()))
+
+ var basicACL acl.Basic
+ basicACL.MakeSticky()
+
+ info.SetBasicACL(basicACL)
+
+ require.True(t, checker.StickyBitCheck(info, usertest.ID()))
+ })
+
+ t.Run("owner ID and/or public key emptiness", func(t *testing.T) {
+ var info v2.RequestInfo
+
+ info.SetRequestRole(acl.RoleOthers) // should be non-system role
+
+ assertFn := func(isSticky, withKey, withOwner, expected bool) {
+ info := info
+ if isSticky {
+ var basicACL acl.Basic
+ basicACL.MakeSticky()
+
+ info.SetBasicACL(basicACL)
+ }
+
+ if withKey {
+ info.SetSenderKey(make([]byte, 33))
+ } else {
+ info.SetSenderKey(nil)
+ }
+
+ var ownerID user.ID
+
+ if withOwner {
+ ownerID = usertest.ID()
+ }
+
+ require.Equal(t, expected, checker.StickyBitCheck(info, ownerID))
+ }
+
+ assertFn(true, false, false, false)
+ assertFn(true, true, false, false)
+ assertFn(true, false, true, false)
+ assertFn(false, false, false, true)
+ assertFn(false, true, false, true)
+ assertFn(false, false, true, true)
+ assertFn(false, true, true, true)
+ })
+}
diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go
new file mode 100644
index 000000000..94e015abe
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/eacl_test.go
@@ -0,0 +1,166 @@
+package v2
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "testing"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+type testLocalStorage struct {
+ t *testing.T
+
+ expAddr oid.Address
+
+ obj *objectSDK.Object
+
+ err error
+}
+
+func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
+ require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
+
+ return s.obj, s.err
+}
+
+func testXHeaders(strs ...string) []session.XHeader {
+ res := make([]session.XHeader, len(strs)/2)
+
+ for i := 0; i < len(strs); i += 2 {
+ res[i/2].SetKey(strs[i])
+ res[i/2].SetValue(strs[i+1])
+ }
+
+ return res
+}
+
+func TestHeadRequest(t *testing.T) {
+ req := new(objectV2.HeadRequest)
+
+ meta := new(session.RequestMetaHeader)
+ req.SetMetaHeader(meta)
+
+ body := new(objectV2.HeadRequestBody)
+ req.SetBody(body)
+
+ addr := oidtest.Address()
+
+ var addrV2 refs.Address
+ addr.WriteToV2(&addrV2)
+
+ body.SetAddress(&addrV2)
+
+ xKey := "x-key"
+ xVal := "x-val"
+ xHdrs := testXHeaders(
+ xKey, xVal,
+ )
+
+ meta.SetXHeaders(xHdrs)
+
+ obj := objectSDK.New()
+
+ attrKey := "attr_key"
+ attrVal := "attr_val"
+ var attr objectSDK.Attribute
+ attr.SetKey(attrKey)
+ attr.SetValue(attrVal)
+ obj.SetAttributes(attr)
+
+ table := new(eaclSDK.Table)
+
+ priv, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ senderKey := priv.PublicKey()
+
+ r := eaclSDK.NewRecord()
+ r.SetOperation(eaclSDK.OperationHead)
+ r.SetAction(eaclSDK.ActionDeny)
+ r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal)
+ r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal)
+ eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
+
+ table.AddRecord(r)
+
+ lStorage := &testLocalStorage{
+ t: t,
+ expAddr: addr,
+ obj: obj,
+ }
+
+ id := addr.Object()
+
+ newSource := func(t *testing.T) eaclSDK.TypedHeaderSource {
+ hdrSrc, err := NewMessageHeaderSource(
+ lStorage,
+ NewRequestXHeaderSource(req),
+ addr.Container(),
+ WithOID(&id))
+ require.NoError(t, err)
+ return hdrSrc
+ }
+
+ cnr := addr.Container()
+
+ unit := new(eaclSDK.ValidationUnit).
+ WithContainerID(&cnr).
+ WithOperation(eaclSDK.OperationHead).
+ WithSenderKey(senderKey.Bytes()).
+ WithEACLTable(table)
+
+ validator := eaclSDK.NewValidator()
+
+ checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t)))
+
+ meta.SetXHeaders(nil)
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ meta.SetXHeaders(xHdrs)
+
+ obj.SetAttributes()
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ lStorage.err = errors.New("any error")
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ r.SetAction(eaclSDK.ActionAllow)
+
+ rID := eaclSDK.NewRecord()
+ rID.SetOperation(eaclSDK.OperationHead)
+ rID.SetAction(eaclSDK.ActionDeny)
+ rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object())
+ eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
+
+ table = eaclSDK.NewTable()
+ table.AddRecord(r)
+ table.AddRecord(rID)
+
+ unit.WithEACLTable(table)
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+}
+
+func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
+ actual, fromRule := v.CalculateAction(u)
+ require.True(t, fromRule)
+ require.Equal(t, expected, actual)
+}
+
+func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
+ actual, fromRule := v.CalculateAction(u)
+ require.False(t, fromRule)
+ require.Equal(t, eaclSDK.ActionAllow, actual)
+}
diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go
new file mode 100644
index 000000000..ecb793df8
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/headers.go
@@ -0,0 +1,246 @@
+package v2
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+type Option func(*cfg)
+
+type cfg struct {
+ storage ObjectStorage
+
+ msg XHeaderSource
+
+ cnr cid.ID
+ obj *oid.ID
+}
+
+type ObjectStorage interface {
+ Head(context.Context, oid.Address) (*objectSDK.Object, error)
+}
+
+type Request interface {
+ GetMetaHeader() *session.RequestMetaHeader
+}
+
+type Response interface {
+ GetMetaHeader() *session.ResponseMetaHeader
+}
+
+type headerSource struct {
+ requestHeaders []eaclSDK.Header
+ objectHeaders []eaclSDK.Header
+
+ incompleteObjectHeaders bool
+}
+
+func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) {
+ cfg := &cfg{
+ storage: os,
+ cnr: cnrID,
+ msg: xhs,
+ }
+
+ for i := range opts {
+ opts[i](cfg)
+ }
+
+ if cfg.msg == nil {
+ return nil, errors.New("message is not provided")
+ }
+
+ var res headerSource
+
+ err := cfg.readObjectHeaders(&res)
+ if err != nil {
+ return nil, err
+ }
+
+ res.requestHeaders = cfg.msg.GetXHeaders()
+
+ return res, nil
+}
+
+func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) {
+ switch typ {
+ default:
+ return nil, true
+ case eaclSDK.HeaderFromRequest:
+ return h.requestHeaders, true
+ case eaclSDK.HeaderFromObject:
+ return h.objectHeaders, !h.incompleteObjectHeaders
+ }
+}
+
+type xHeader session.XHeader
+
+func (x xHeader) Key() string {
+ return (*session.XHeader)(&x).GetKey()
+}
+
+func (x xHeader) Value() string {
+ return (*session.XHeader)(&x).GetValue()
+}
+
+var errMissingOID = errors.New("object ID is missing")
+
+func (h *cfg) readObjectHeaders(dst *headerSource) error {
+ switch m := h.msg.(type) {
+ default:
+ panic(fmt.Sprintf("unexpected message type %T", h.msg))
+ case requestXHeaderSource:
+ return h.readObjectHeadersFromRequestXHeaderSource(m, dst)
+ case responseXHeaderSource:
+ return h.readObjectHeadersResponseXHeaderSource(m, dst)
+ }
+}
+
+func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error {
+ switch req := m.req.(type) {
+ case
+ *objectV2.GetRequest,
+ *objectV2.HeadRequest:
+ if h.obj == nil {
+ return errMissingOID
+ }
+
+ objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+
+ dst.objectHeaders = objHeaders
+ dst.incompleteObjectHeaders = !completed
+ case
+ *objectV2.GetRangeRequest,
+ *objectV2.GetRangeHashRequest,
+ *objectV2.DeleteRequest:
+ if h.obj == nil {
+ return errMissingOID
+ }
+
+ dst.objectHeaders = addressHeaders(h.cnr, h.obj)
+ case *objectV2.PutRequest:
+ if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
+ oV2 := new(objectV2.Object)
+ oV2.SetObjectID(v.GetObjectID())
+ oV2.SetHeader(v.GetHeader())
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ case *objectV2.PutSingleRequest:
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj)
+ case *objectV2.SearchRequest:
+ cnrV2 := req.GetBody().GetContainerID()
+ var cnr cid.ID
+
+ if cnrV2 != nil {
+ if err := cnr.ReadFromV2(*cnrV2); err != nil {
+ return fmt.Errorf("can't parse container ID: %w", err)
+ }
+ }
+
+ dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
+ }
+ return nil
+}
+
+func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error {
+ switch resp := m.resp.(type) {
+ default:
+ objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+
+ dst.objectHeaders = objectHeaders
+ dst.incompleteObjectHeaders = !completed
+ case *objectV2.GetResponse:
+ if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
+ oV2 := new(objectV2.Object)
+ oV2.SetObjectID(v.GetObjectID())
+ oV2.SetHeader(v.GetHeader())
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ case *objectV2.HeadResponse:
+ oV2 := new(objectV2.Object)
+
+ var hdr *objectV2.Header
+
+ switch v := resp.GetBody().GetHeaderPart().(type) {
+ case *objectV2.ShortHeader:
+ hdr = new(objectV2.Header)
+
+ var idV2 refsV2.ContainerID
+ h.cnr.WriteToV2(&idV2)
+
+ hdr.SetContainerID(&idV2)
+ hdr.SetVersion(v.GetVersion())
+ hdr.SetCreationEpoch(v.GetCreationEpoch())
+ hdr.SetOwnerID(v.GetOwnerID())
+ hdr.SetObjectType(v.GetObjectType())
+ hdr.SetPayloadLength(v.GetPayloadLength())
+ case *objectV2.HeaderWithSignature:
+ hdr = v.GetHeader()
+ }
+
+ oV2.SetHeader(hdr)
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ return nil
+}
+
+func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) {
+ if idObj != nil {
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(*idObj)
+
+ obj, err := h.storage.Head(context.TODO(), addr)
+ if err == nil {
+ return headersFromObject(obj, cnr, idObj), true
+ }
+ }
+
+ return addressHeaders(cnr, idObj), false
+}
+
+func cidHeader(idCnr cid.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectContainerID,
+ v: idCnr.EncodeToString(),
+ }
+}
+
+func oidHeader(obj oid.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectID,
+ v: obj.EncodeToString(),
+ }
+}
+
+func ownerIDHeader(ownerID user.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectOwnerID,
+ v: ownerID.EncodeToString(),
+ }
+}
+
+func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
+ hh := make([]eaclSDK.Header, 0, 2)
+ hh = append(hh, cidHeader(cnr))
+
+ if oid != nil {
+ hh = append(hh, oidHeader(*oid))
+ }
+
+ return hh
+}
diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go
new file mode 100644
index 000000000..92570a3c5
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/object.go
@@ -0,0 +1,92 @@
+package v2
+
+import (
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type sysObjHdr struct {
+ k, v string
+}
+
+func (s sysObjHdr) Key() string {
+ return s.k
+}
+
+func (s sysObjHdr) Value() string {
+ return s.v
+}
+
+func u64Value(v uint64) string {
+ return strconv.FormatUint(v, 10)
+}
+
+func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
+ var count int
+ for obj := obj; obj != nil; obj = obj.Parent() {
+ count += 9 + len(obj.Attributes())
+ }
+
+ res := make([]eaclSDK.Header, 0, count)
+ for ; obj != nil; obj = obj.Parent() {
+ res = append(res,
+ cidHeader(cnr),
+ // creation epoch
+ sysObjHdr{
+ k: acl.FilterObjectCreationEpoch,
+ v: u64Value(obj.CreationEpoch()),
+ },
+ // payload size
+ sysObjHdr{
+ k: acl.FilterObjectPayloadLength,
+ v: u64Value(obj.PayloadSize()),
+ },
+ // object version
+ sysObjHdr{
+ k: acl.FilterObjectVersion,
+ v: obj.Version().String(),
+ },
+ // object type
+ sysObjHdr{
+ k: acl.FilterObjectType,
+ v: obj.Type().String(),
+ },
+ )
+
+ if oid != nil {
+ res = append(res, oidHeader(*oid))
+ }
+
+ if idOwner := obj.OwnerID(); !idOwner.IsEmpty() {
+ res = append(res, ownerIDHeader(idOwner))
+ }
+
+ cs, ok := obj.PayloadChecksum()
+ if ok {
+ res = append(res, sysObjHdr{
+ k: acl.FilterObjectPayloadHash,
+ v: cs.String(),
+ })
+ }
+
+ cs, ok = obj.PayloadHomomorphicHash()
+ if ok {
+ res = append(res, sysObjHdr{
+ k: acl.FilterObjectHomomorphicHash,
+ v: cs.String(),
+ })
+ }
+
+ attrs := obj.Attributes()
+ for i := range attrs {
+ res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface
+ }
+ }
+
+ return res
+}
diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go
new file mode 100644
index 000000000..d91a21c75
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/opts.go
@@ -0,0 +1,11 @@
+package v2
+
+import (
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func WithOID(v *oid.ID) Option {
+ return func(c *cfg) {
+ c.obj = v
+ }
+}
diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go
new file mode 100644
index 000000000..ce380c117
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/xheader.go
@@ -0,0 +1,69 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+)
+
+type XHeaderSource interface {
+ GetXHeaders() []eaclSDK.Header
+}
+
+type requestXHeaderSource struct {
+ req Request
+}
+
+func NewRequestXHeaderSource(req Request) XHeaderSource {
+ return requestXHeaderSource{req: req}
+}
+
+type responseXHeaderSource struct {
+ resp Response
+
+ req Request
+}
+
+func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource {
+ return responseXHeaderSource{resp: resp, req: req}
+}
+
+func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header {
+ ln := 0
+
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ ln += len(meta.GetXHeaders())
+ }
+
+ res := make([]eaclSDK.Header, 0, ln)
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ x := meta.GetXHeaders()
+ for i := range x {
+ res = append(res, (xHeader)(x[i]))
+ }
+ }
+
+ return res
+}
+
+func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header {
+ ln := 0
+ xHdrs := make([][]session.XHeader, 0)
+
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ x := meta.GetXHeaders()
+
+ ln += len(x)
+
+ xHdrs = append(xHdrs, x)
+ }
+
+ res := make([]eaclSDK.Header, 0, ln)
+
+ for i := range xHdrs {
+ for j := range xHdrs[i] {
+ res = append(res, xHeader(xHdrs[i][j]))
+ }
+ }
+
+ return res
+}
diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go
new file mode 100644
index 000000000..11b9e6e5f
--- /dev/null
+++ b/pkg/services/object/acl/v2/errors.go
@@ -0,0 +1,41 @@
+package v2
+
+import (
+ "fmt"
+
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+)
+
+const invalidRequestMessage = "malformed request"
+
+func malformedRequestError(reason string) error {
+ return fmt.Errorf("%s: %s", invalidRequestMessage, reason)
+}
+
+var (
+ errEmptyBody = malformedRequestError("empty body")
+ errEmptyVerificationHeader = malformedRequestError("empty verification header")
+ errEmptyBodySig = malformedRequestError("empty at body signature")
+ errInvalidSessionSig = malformedRequestError("invalid session token signature")
+ errInvalidSessionOwner = malformedRequestError("invalid session token owner")
+ errInvalidVerb = malformedRequestError("session token verb is invalid")
+)
+
+const (
+ accessDeniedACLReasonFmt = "access to operation %s is denied by basic ACL check"
+ accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v"
+)
+
+func basicACLErr(info RequestInfo) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedACLReasonFmt, info.operation))
+
+ return errAccessDenied
+}
+
+func eACLErr(info RequestInfo, err error) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err))
+
+ return errAccessDenied
+}
diff --git a/pkg/services/object/acl/v2/errors_test.go b/pkg/services/object/acl/v2/errors_test.go
new file mode 100644
index 000000000..2d2b7bc8d
--- /dev/null
+++ b/pkg/services/object/acl/v2/errors_test.go
@@ -0,0 +1,30 @@
+package v2
+
+import (
+ "errors"
+ "testing"
+
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBasicACLErr(t *testing.T) {
+ var reqInfo RequestInfo
+ err := basicACLErr(reqInfo)
+
+ var errAccessDenied *apistatus.ObjectAccessDenied
+
+ require.ErrorAs(t, err, &errAccessDenied,
+ "basicACLErr must be able to be casted to apistatus.ObjectAccessDenied")
+}
+
+func TestEACLErr(t *testing.T) {
+ var reqInfo RequestInfo
+ testErr := errors.New("test-eacl")
+ err := eACLErr(reqInfo, testErr)
+
+ var errAccessDenied *apistatus.ObjectAccessDenied
+
+ require.ErrorAs(t, err, &errAccessDenied,
+ "eACLErr must be able to be casted to apistatus.ObjectAccessDenied")
+}
diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go
new file mode 100644
index 000000000..15fcce884
--- /dev/null
+++ b/pkg/services/object/acl/v2/opts.go
@@ -0,0 +1,12 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+)
+
+// WithLogger returns option to set logger.
+func WithLogger(v *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = v
+ }
+}
diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go
new file mode 100644
index 000000000..e35cd2e11
--- /dev/null
+++ b/pkg/services/object/acl/v2/request.go
@@ -0,0 +1,159 @@
+package v2
+
+import (
+ "crypto/ecdsa"
+ "fmt"
+
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// RequestInfo groups parsed version-independent (from SDK library)
+// request information and raw API request.
+type RequestInfo struct {
+ basicACL acl.Basic
+ requestRole acl.Role
+ operation acl.Op // put, get, head, etc.
+ cnrOwner user.ID // container owner
+
+ // cnrNamespace defined to which namespace a container is belonged.
+ cnrNamespace string
+
+ idCnr cid.ID
+
+ // optional for some request
+ // e.g. Put, Search
+ obj *oid.ID
+
+ senderKey []byte
+
+ bearer *bearer.Token // bearer token of request
+
+ srcRequest any
+}
+
+func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) {
+ r.basicACL = basicACL
+}
+
+func (r *RequestInfo) SetRequestRole(requestRole acl.Role) {
+ r.requestRole = requestRole
+}
+
+func (r *RequestInfo) SetSenderKey(senderKey []byte) {
+ r.senderKey = senderKey
+}
+
+// Request returns raw API request.
+func (r RequestInfo) Request() any {
+ return r.srcRequest
+}
+
+// ContainerOwner returns owner if the container.
+func (r RequestInfo) ContainerOwner() user.ID {
+ return r.cnrOwner
+}
+
+func (r RequestInfo) ContainerNamespace() string {
+ return r.cnrNamespace
+}
+
+// ObjectID return object ID.
+func (r RequestInfo) ObjectID() *oid.ID {
+ return r.obj
+}
+
+// ContainerID return container ID.
+func (r RequestInfo) ContainerID() cid.ID {
+ return r.idCnr
+}
+
+// CleanBearer forces cleaning bearer token information.
+func (r *RequestInfo) CleanBearer() {
+ r.bearer = nil
+}
+
+// Bearer returns bearer token of the request.
+func (r RequestInfo) Bearer() *bearer.Token {
+ return r.bearer
+}
+
+// BasicACL returns basic ACL of the container.
+func (r RequestInfo) BasicACL() acl.Basic {
+ return r.basicACL
+}
+
+// SenderKey returns public key of the request's sender.
+func (r RequestInfo) SenderKey() []byte {
+ return r.senderKey
+}
+
+// Operation returns request's operation.
+func (r RequestInfo) Operation() acl.Op {
+ return r.operation
+}
+
+// RequestRole returns request sender's role.
+func (r RequestInfo) RequestRole() acl.Role {
+ return r.requestRole
+}
+
+// IsSoftAPECheck states if APE should perform soft checks.
+// Soft APE check allows a request if CheckAPE returns NoRuleFound for it,
+// otherwise it denies the request.
+func (r RequestInfo) IsSoftAPECheck() bool {
+ return r.BasicACL().Bits() != 0
+}
+
+// MetaWithToken groups session and bearer tokens,
+// verification header and raw API request.
+type MetaWithToken struct {
+ vheader *sessionV2.RequestVerificationHeader
+ token *sessionSDK.Object
+ bearer *bearer.Token
+ src any
+}
+
+// RequestOwner returns ownerID and its public key
+// according to internal meta information.
+func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) {
+ if r.vheader == nil {
+ return nil, nil, errEmptyVerificationHeader
+ }
+
+ if r.bearer != nil && r.bearer.Impersonate() {
+ return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes())
+ }
+
+ // if session token is presented, use it as truth source
+ if r.token != nil {
+ // verify signature of session token
+ return ownerFromToken(r.token)
+ }
+
+ // otherwise get original body signature
+ bodySignature := originalBodySignature(r.vheader)
+ if bodySignature == nil {
+ return nil, nil, errEmptyBodySig
+ }
+
+ return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
+}
+
+func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
+ key, err := unmarshalPublicKey(rawKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var idSender user.ID
+ user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
+
+ return &idSender, key, nil
+}
diff --git a/pkg/services/object/ape/metadata_test.go b/pkg/services/object/acl/v2/request_test.go
similarity index 83%
rename from pkg/services/object/ape/metadata_test.go
rename to pkg/services/object/acl/v2/request_test.go
index fd919008f..618af3469 100644
--- a/pkg/services/object/ape/metadata_test.go
+++ b/pkg/services/object/acl/v2/request_test.go
@@ -1,4 +1,4 @@
-package ape
+package v2
import (
"testing"
@@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) {
vh.SetBodySignature(&userSignature)
t.Run("empty verification header", func(t *testing.T) {
- req := Metadata{}
+ req := MetaWithToken{}
checkOwner(t, req, nil, errEmptyVerificationHeader)
})
t.Run("empty verification header signature", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: new(sessionV2.RequestVerificationHeader),
+ req := MetaWithToken{
+ vheader: new(sessionV2.RequestVerificationHeader),
}
checkOwner(t, req, nil, errEmptyBodySig)
})
t.Run("no tokens", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
+ req := MetaWithToken{
+ vheader: vh,
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer without impersonate, no session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, false),
+ req := MetaWithToken{
+ vheader: vh,
+ bearer: newBearer(t, containerOwner, userID, false),
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer with impersonate, no session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, true),
+ req := MetaWithToken{
+ vheader: vh,
+ bearer: newBearer(t, containerOwner, userID, true),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, true),
- SessionToken: newSession(t, pk),
+ req := MetaWithToken{
+ vheader: vh,
+ bearer: newBearer(t, containerOwner, userID, true),
+ token: newSession(t, pk),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
t.Run("with session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- SessionToken: newSession(t, containerOwner),
+ req := MetaWithToken{
+ vheader: vh,
+ token: newSession(t, containerOwner),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) {
var tok sessionSDK.Object
require.NoError(t, tok.ReadFromV2(tokV2))
- req := Metadata{
- VerificationHeader: vh,
- SessionToken: &tok,
+ req := MetaWithToken{
+ vheader: vh,
+ token: &tok,
}
checkOwner(t, req, nil, errInvalidSessionOwner)
})
@@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool
return &tok
}
-func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) {
+func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) {
_, actual, err := req.RequestOwner()
if expectedErr != nil {
require.ErrorIs(t, err, expectedErr)
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
new file mode 100644
index 000000000..e02a3be36
--- /dev/null
+++ b/pkg/services/object/acl/v2/service.go
@@ -0,0 +1,919 @@
+package v2
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "go.uber.org/zap"
+)
+
+// Service checks basic ACL rules.
+type Service struct {
+ *cfg
+
+ c objectCore.SenderClassifier
+}
+
+type putStreamBasicChecker struct {
+ source *Service
+ next object.PutObjectStream
+}
+
+type patchStreamBasicChecker struct {
+ source *Service
+ next object.PatchObjectStream
+ nonFirstSend bool
+}
+
+type getStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.GetObjectStream
+
+ info RequestInfo
+}
+
+type rangeStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.GetObjectRangeStream
+
+ info RequestInfo
+}
+
+type searchStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.SearchStream
+
+ info RequestInfo
+}
+
+// Option represents Service constructor option.
+type Option func(*cfg)
+
+type cfg struct {
+ log *logger.Logger
+
+ containers container.Source
+
+ checker ACLChecker
+
+ irFetcher InnerRingFetcher
+
+ nm netmap.Source
+
+ next object.ServiceServer
+}
+
+// New is a constructor for object ACL checking service.
+func New(next object.ServiceServer,
+ nm netmap.Source,
+ irf InnerRingFetcher,
+ acl ACLChecker,
+ cs container.Source,
+ opts ...Option,
+) Service {
+ cfg := &cfg{
+ log: &logger.Logger{Logger: zap.L()},
+ next: next,
+ nm: nm,
+ irFetcher: irf,
+ checker: acl,
+ containers: cs,
+ }
+
+ for i := range opts {
+ opts[i](cfg)
+ }
+
+ return Service{
+ cfg: cfg,
+ c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
+ }
+}
+
+// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context.
+// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
+type wrappedGetObjectStream struct {
+ object.GetObjectStream
+
+ requestInfo RequestInfo
+}
+
+func (w *wrappedGetObjectStream) Context() context.Context {
+ return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{
+ Namespace: w.requestInfo.ContainerNamespace(),
+ ContainerOwner: w.requestInfo.ContainerOwner(),
+ SenderKey: w.requestInfo.SenderKey(),
+ Role: w.requestInfo.RequestRole(),
+ SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
+ BearerToken: w.requestInfo.Bearer(),
+ })
+}
+
+func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream {
+ return &wrappedGetObjectStream{
+ GetObjectStream: getObjectStream,
+ requestInfo: reqInfo,
+ }
+}
+
+// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context.
+// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
+type wrappedRangeStream struct {
+ object.GetObjectRangeStream
+
+ requestInfo RequestInfo
+}
+
+func (w *wrappedRangeStream) Context() context.Context {
+ return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{
+ Namespace: w.requestInfo.ContainerNamespace(),
+ ContainerOwner: w.requestInfo.ContainerOwner(),
+ SenderKey: w.requestInfo.SenderKey(),
+ Role: w.requestInfo.RequestRole(),
+ SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
+ BearerToken: w.requestInfo.Bearer(),
+ })
+}
+
+func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream {
+ return &wrappedRangeStream{
+ GetObjectRangeStream: rangeStream,
+ requestInfo: reqInfo,
+ }
+}
+
+// wrappedSearchStream propagates RequestContext into SearchStream's context.
+// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
+type wrappedSearchStream struct {
+ object.SearchStream
+
+ requestInfo RequestInfo
+}
+
+func (w *wrappedSearchStream) Context() context.Context {
+ return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{
+ Namespace: w.requestInfo.ContainerNamespace(),
+ ContainerOwner: w.requestInfo.ContainerOwner(),
+ SenderKey: w.requestInfo.SenderKey(),
+ Role: w.requestInfo.RequestRole(),
+ SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
+ BearerToken: w.requestInfo.Bearer(),
+ })
+}
+
+func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream {
+ return &wrappedSearchStream{
+ SearchStream: searchStream,
+ requestInfo: reqInfo,
+ }
+}
+
+// Get implements ServiceServer interface, makes ACL checks and calls
+// next Get method in the ServiceServer pipeline.
+func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.Get(request, &getStreamBasicChecker{
+ GetObjectStream: newWrappedGetObjectStreamStream(stream, reqInfo),
+ info: reqInfo,
+ checker: b.checker,
+ })
+}
+
+func (b Service) Put() (object.PutObjectStream, error) {
+ streamer, err := b.next.Put()
+
+ return putStreamBasicChecker{
+ source: &b,
+ next: streamer,
+ }, err
+}
+
+func (b Service) Patch() (object.PatchObjectStream, error) {
+ streamer, err := b.next.Patch()
+
+ return &patchStreamBasicChecker{
+ source: &b,
+ next: streamer,
+ }, err
+}
+
+func (b Service) Head(
+ ctx context.Context,
+ request *objectV2.HeadRequest,
+) (*objectV2.HeadResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+ }
+
+ resp, err := b.next.Head(requestContext(ctx, reqInfo), request)
+ if err == nil {
+ if err = b.checker.CheckEACL(resp, reqInfo); err != nil {
+ err = eACLErr(reqInfo, err)
+ }
+ }
+
+ return resp, err
+}
+
+func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
+ id, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, id, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch)
+ if err != nil {
+ return err
+ }
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.Search(request, &searchStreamBasicChecker{
+ checker: b.checker,
+ SearchStream: newWrappedSearchStream(stream, reqInfo),
+ info: reqInfo,
+ })
+}
+
+func (b Service) Delete(
+ ctx context.Context,
+ request *objectV2.DeleteRequest,
+) (*objectV2.DeleteResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.Delete(requestContext(ctx, reqInfo), request)
+}
+
+func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.GetRange(request, &rangeStreamBasicChecker{
+ checker: b.checker,
+ GetObjectRangeStream: newWrappedRangeStream(stream, reqInfo),
+ info: reqInfo,
+ })
+}
+
+func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context {
+ return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{
+ Namespace: reqInfo.ContainerNamespace(),
+ ContainerOwner: reqInfo.ContainerOwner(),
+ SenderKey: reqInfo.SenderKey(),
+ Role: reqInfo.RequestRole(),
+ SoftAPECheck: reqInfo.IsSoftAPECheck(),
+ BearerToken: reqInfo.Bearer(),
+ })
+}
+
+func (b Service) GetRangeHash(
+ ctx context.Context,
+ request *objectV2.GetRangeHashRequest,
+) (*objectV2.GetRangeHashResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.GetRangeHash(requestContext(ctx, reqInfo), request)
+}
+
+func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID()
+ if idV2 == nil {
+ return nil, errors.New("missing object owner")
+ }
+
+ var idOwner user.ID
+
+ err = idOwner.ReadFromV2(*idV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid object owner: %w", err)
+ }
+
+ obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID())
+ if err != nil {
+ return nil, err
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return nil, err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectPut)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) || !b.checker.StickyBitCheck(reqInfo, idOwner) {
+ return nil, basicACLErr(reqInfo)
+ }
+ if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.PutSingle(requestContext(ctx, reqInfo), request)
+}
+
+func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
+ body := request.GetBody()
+ if body == nil {
+ return errEmptyBody
+ }
+
+ part := body.GetObjectPart()
+ if part, ok := part.(*objectV2.PutObjectPartInit); ok {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ idV2 := part.GetHeader().GetOwnerID()
+ if idV2 == nil {
+ return errors.New("missing object owner")
+ }
+
+ var idOwner user.ID
+
+ err = idOwner.ReadFromV2(*idV2)
+ if err != nil {
+ return fmt.Errorf("invalid object owner: %w", err)
+ }
+
+ objV2 := part.GetObjectID()
+ var obj *oid.ID
+
+ if objV2 != nil {
+ obj = new(oid.ID)
+
+ err = obj.ReadFromV2(*objV2)
+ if err != nil {
+ return err
+ }
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !p.source.checker.CheckBasicACL(reqInfo) || !p.source.checker.StickyBitCheck(reqInfo, idOwner) {
+ return basicACLErr(reqInfo)
+ }
+ }
+
+ ctx = requestContext(ctx, reqInfo)
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
+ var sTok *sessionSDK.Object
+
+ if tokV2 != nil {
+ sTok = new(sessionSDK.Object)
+
+ err := sTok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
+ // if session relates to object's removal, we don't check
+ // relation of the tombstone to the session here since user
+ // can't predict tomb's ID.
+ err = assertSessionRelation(*sTok, cnr, nil)
+ } else {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return sTok, nil
+}
+
+func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
+ if _, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+ }
+
+ return g.GetObjectStream.Send(resp)
+}
+
+func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+
+ return g.GetObjectRangeStream.Send(resp)
+}
+
+func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+
+ return g.SearchStream.Send(resp)
+}
+
+func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
+ body := request.GetBody()
+ if body == nil {
+ return errEmptyBody
+ }
+
+ if !p.nonFirstSend {
+ p.nonFirstSend = true
+
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ objV2 := request.GetBody().GetAddress().GetObjectID()
+ if objV2 == nil {
+ return errors.New("missing oid")
+ }
+ obj := new(oid.ID)
+ err = obj.ReadFromV2(*objV2)
+ if err != nil {
+ return err
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(req, cnr)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ ctx = requestContext(ctx, reqInfo)
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
+ cnr, err := b.containers.Get(idCnr) // fetch actual container
+ if err != nil {
+ return info, err
+ }
+
+ if req.token != nil {
+ currentEpoch, err := b.nm.Epoch()
+ if err != nil {
+ return info, errors.New("can't fetch current epoch")
+ }
+ if req.token.ExpiredAt(currentEpoch) {
+ return info, new(apistatus.SessionTokenExpired)
+ }
+ if req.token.InvalidAt(currentEpoch) {
+ return info, fmt.Errorf("%s: token is invalid at %d epoch)",
+ invalidRequestMessage, currentEpoch)
+ }
+
+ if !assertVerb(*req.token, op) {
+ return info, errInvalidVerb
+ }
+ }
+
+ // find request role and key
+ ownerID, ownerKey, err := req.RequestOwner()
+ if err != nil {
+ return info, err
+ }
+ res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
+ if err != nil {
+ return info, err
+ }
+
+ info.basicACL = cnr.Value.BasicACL()
+ info.requestRole = res.Role
+ info.operation = op
+ info.cnrOwner = cnr.Value.Owner()
+ info.idCnr = idCnr
+
+ cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
+ if hasNamespace {
+ info.cnrNamespace = cnrNamespace
+ }
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ info.senderKey = res.Key
+
+ // add bearer token if it is present in request
+ info.bearer = req.bearer
+
+ info.srcRequest = req.src
+
+ return info, nil
+}
+
+// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert.
+func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) {
+ cnr, err := b.containers.Get(idCnr) // fetch actual container
+ if err != nil {
+ return info, err
+ }
+
+ if req.token != nil {
+ currentEpoch, err := b.nm.Epoch()
+ if err != nil {
+ return info, errors.New("can't fetch current epoch")
+ }
+ if req.token.ExpiredAt(currentEpoch) {
+ return info, new(apistatus.SessionTokenExpired)
+ }
+ if req.token.InvalidAt(currentEpoch) {
+ return info, fmt.Errorf("%s: token is invalid at %d epoch)",
+ invalidRequestMessage, currentEpoch)
+ }
+ }
+
+ // find request role and key
+ ownerID, ownerKey, err := req.RequestOwner()
+ if err != nil {
+ return info, err
+ }
+ res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
+ if err != nil {
+ return info, err
+ }
+
+ info.basicACL = cnr.Value.BasicACL()
+ info.requestRole = res.Role
+ info.cnrOwner = cnr.Value.Owner()
+ info.idCnr = idCnr
+
+ cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
+ if hasNamespace {
+ info.cnrNamespace = cnrNamespace
+ }
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ info.senderKey = res.Key
+
+ // add bearer token if it is present in request
+ info.bearer = req.bearer
+
+ info.srcRequest = req.src
+
+ return info, nil
+}
diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go
new file mode 100644
index 000000000..061cd26b6
--- /dev/null
+++ b/pkg/services/object/acl/v2/types.go
@@ -0,0 +1,28 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+// ACLChecker is an interface that must provide
+// ACL related checks.
+type ACLChecker interface {
+ // CheckBasicACL must return true only if request
+ // passes basic ACL validation.
+ CheckBasicACL(RequestInfo) bool
+ // CheckEACL must return non-nil error if request
+ // doesn't pass extended ACL validation.
+ CheckEACL(any, RequestInfo) error
+ // StickyBitCheck must return true only if sticky bit
+ // is disabled or enabled but request contains correct
+ // owner field.
+ StickyBitCheck(RequestInfo, user.ID) bool
+}
+
+// InnerRingFetcher is an interface that must provide
+// Inner Ring information.
+type InnerRingFetcher interface {
+ // InnerRingKeys must return list of public keys of
+ // the actual inner ring.
+ InnerRingKeys() ([][]byte, error)
+}
diff --git a/pkg/services/object/ape/util.go b/pkg/services/object/acl/v2/util.go
similarity index 58%
rename from pkg/services/object/ape/util.go
rename to pkg/services/object/acl/v2/util.go
index 5cd2caa50..e02f70771 100644
--- a/pkg/services/object/ape/util.go
+++ b/pkg/services/object/acl/v2/util.go
@@ -1,4 +1,4 @@
-package ape
+package v2
import (
"crypto/ecdsa"
@@ -6,34 +6,57 @@ import (
"errors"
"fmt"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
- if cidV2 != nil {
- if err = cnrID.ReadFromV2(*cidV2); err != nil {
- return
+var errMissingContainerID = errors.New("missing container ID")
+
+func getContainerIDFromRequest(req any) (cid.ID, error) {
+ var idV2 *refsV2.ContainerID
+ var id cid.ID
+
+ switch v := req.(type) {
+ case *objectV2.GetRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.PutRequest:
+ part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit)
+ if !ok {
+ return cid.ID{}, errors.New("can't get container ID in chunk")
}
- } else {
- err = errMissingContainerID
- return
+
+ idV2 = part.GetHeader().GetContainerID()
+ case *objectV2.HeadRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.SearchRequest:
+ idV2 = v.GetBody().GetContainerID()
+ case *objectV2.DeleteRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.GetRangeRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.GetRangeHashRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.PutSingleRequest:
+ idV2 = v.GetBody().GetObject().GetHeader().GetContainerID()
+ case *objectV2.PatchRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ default:
+ return cid.ID{}, errors.New("unknown request type")
}
- if objV2 != nil {
- objID = new(oid.ID)
- if err = objID.ReadFromV2(*objV2); err != nil {
- return
- }
+ if idV2 == nil {
+ return cid.ID{}, errMissingContainerID
}
- return
+
+ return id, id.ReadFromV2(*idV2)
}
// originalBearerToken goes down to original request meta header and fetches
@@ -52,6 +75,50 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er
return &tok, tok.ReadFromV2(*tokV2)
}
+// originalSessionToken goes down to original request meta header and fetches
+// session token from there.
+func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) {
+ for header.GetOrigin() != nil {
+ header = header.GetOrigin()
+ }
+
+ tokV2 := header.GetSessionToken()
+ if tokV2 == nil {
+ return nil, nil
+ }
+
+ var tok sessionSDK.Object
+
+ err := tok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ return &tok, nil
+}
+
+// getObjectIDFromRequestBody decodes oid.ID from the common interface of the
+// object reference's holders. Returns an error if object ID is missing in the request.
+func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) {
+ idV2 := body.GetAddress().GetObjectID()
+ return getObjectIDFromRefObjectID(idV2)
+}
+
+func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) {
+ if idV2 == nil {
+ return nil, errors.New("missing object ID")
+ }
+
+ var id oid.ID
+
+ err := id.ReadFromV2(*idV2)
+ if err != nil {
+ return nil, err
+ }
+
+ return &id, nil
+}
+
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
// 1. First check signature of session token.
if !token.VerifySignature() {
@@ -105,16 +172,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-// assertVerb checks that token verb corresponds to the method.
-func assertVerb(tok sessionSDK.Object, method string) bool {
- switch method {
- case nativeschema.MethodPutObject:
+// assertVerb checks that token verb corresponds to op.
+func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
+ switch op {
+ case acl.OpObjectPut:
return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
- case nativeschema.MethodDeleteObject:
+ case acl.OpObjectDelete:
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
- case nativeschema.MethodGetObject:
+ case acl.OpObjectGet:
return tok.AssertVerb(sessionSDK.VerbObjectGet)
- case nativeschema.MethodHeadObject:
+ case acl.OpObjectHead:
return tok.AssertVerb(
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
@@ -123,15 +190,14 @@ func assertVerb(tok sessionSDK.Object, method string) bool {
sessionSDK.VerbObjectRangeHash,
sessionSDK.VerbObjectPatch,
)
- case nativeschema.MethodSearchObject:
+ case acl.OpObjectSearch:
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
- case nativeschema.MethodRangeObject:
+ case acl.OpObjectRange:
return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
- case nativeschema.MethodHashObject:
+ case acl.OpObjectHash:
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
- case nativeschema.MethodPatchObject:
- return tok.AssertVerb(sessionSDK.VerbObjectPatch)
}
+
return false
}
@@ -155,15 +221,3 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error
return nil
}
-
-func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
- key, err := unmarshalPublicKey(rawKey)
- if err != nil {
- return nil, nil, fmt.Errorf("invalid signature key: %w", err)
- }
-
- var idSender user.ID
- user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
-
- return &idSender, key, nil
-}
diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go
new file mode 100644
index 000000000..4b19cecfe
--- /dev/null
+++ b/pkg/services/object/acl/v2/util_test.go
@@ -0,0 +1,136 @@
+package v2
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
+ aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOriginalTokens(t *testing.T) {
+ sToken := sessiontest.ObjectSigned()
+ bToken := bearertest.Token()
+
+ pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, bToken.Sign(*pk))
+
+ var bTokenV2 acl.BearerToken
+ bToken.WriteToV2(&bTokenV2)
+ // This line is needed because SDK uses some custom format for
+ // reserved filters, so `cid.ID` is not converted to string immediately.
+ require.NoError(t, bToken.ReadFromV2(bTokenV2))
+
+ var sTokenV2 session.Token
+ sToken.WriteToV2(&sTokenV2)
+
+ for i := range 10 {
+ metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
+ res, err := originalSessionToken(metaHeaders)
+ require.NoError(t, err)
+ require.Equal(t, sToken, res, i)
+
+ bTok, err := originalBearerToken(metaHeaders)
+ require.NoError(t, err)
+ require.Equal(t, &bToken, bTok, i)
+ }
+}
+
+func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader {
+ metaHeader := new(session.RequestMetaHeader)
+ metaHeader.SetBearerToken(b)
+ metaHeader.SetSessionToken(s)
+
+ for range depth {
+ link := metaHeader
+ metaHeader = new(session.RequestMetaHeader)
+ metaHeader.SetOrigin(link)
+ }
+
+ return metaHeader
+}
+
+func TestIsVerbCompatible(t *testing.T) {
+ // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28
+ table := map[aclsdk.Op][]sessionSDK.ObjectVerb{
+ aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete},
+ aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete},
+ aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet},
+ aclsdk.OpObjectHead: {
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ },
+ aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash},
+ aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash},
+ aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
+ }
+
+ verbs := []sessionSDK.ObjectVerb{
+ sessionSDK.VerbObjectPut,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectSearch,
+ }
+
+ var tok sessionSDK.Object
+
+ for op, list := range table {
+ for _, verb := range verbs {
+ var contains bool
+ for _, v := range list {
+ if v == verb {
+ contains = true
+ break
+ }
+ }
+
+ tok.ForVerb(verb)
+
+ require.Equal(t, contains, assertVerb(tok, op),
+ "%v in token, %s executing", verb, op)
+ }
+ }
+}
+
+func TestAssertSessionRelation(t *testing.T) {
+ var tok sessionSDK.Object
+ cnr := cidtest.ID()
+ cnrOther := cidtest.ID()
+ obj := oidtest.ID()
+ objOther := oidtest.ID()
+
+ // make sure ids differ, otherwise test won't work correctly
+ require.False(t, cnrOther.Equals(cnr))
+ require.False(t, objOther.Equals(obj))
+
+ // bind session to the container (required)
+ tok.BindContainer(cnr)
+
+ // test container-global session
+ require.NoError(t, assertSessionRelation(tok, cnr, nil))
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnrOther, nil))
+ require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
+
+ // limit the session to the particular object
+ tok.LimitByObjects(obj)
+
+ // test fixed object session (here obj arg must be non-nil everywhere)
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnr, &objOther))
+}
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
index bb6067a37..abcd2f4bb 100644
--- a/pkg/services/object/ape/checker.go
+++ b/pkg/services/object/ape/checker.go
@@ -64,8 +64,8 @@ type Prm struct {
// An encoded container's owner user ID.
ContainerOwner user.ID
- // Attributes defined for the container.
- ContainerAttributes map[string]string
+ // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
+ SoftAPECheck bool
// The request's bearer token. It is used in order to check APE overrides with the token.
BearerToken *bearer.Token
@@ -79,10 +79,9 @@ var errMissingOID = errors.New("object ID is not set")
// CheckAPE prepares an APE-request and checks if it is permitted by policies.
func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
// APE check is ignored for some inter-node requests.
- switch prm.Role {
- case nativeschema.PropertyValueContainerRoleContainer:
+ if prm.Role == nativeschema.PropertyValueContainerRoleContainer {
return nil
- case nativeschema.PropertyValueContainerRoleIR:
+ } else if prm.Role == nativeschema.PropertyValueContainerRoleIR {
switch prm.Method {
case nativeschema.MethodGetObject,
nativeschema.MethodHeadObject,
@@ -103,12 +102,13 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
return err
}
- return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{
+ return c.checkerCore.CheckAPE(checkercore.CheckPrm{
Request: r,
PublicKey: pub,
Namespace: prm.Namespace,
Container: prm.Container,
ContainerOwner: prm.ContainerOwner,
BearerToken: prm.BearerToken,
+ SoftAPECheck: prm.SoftAPECheck,
})
}
diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go
index 97eb2b2d7..e03b5750c 100644
--- a/pkg/services/object/ape/checker_test.go
+++ b/pkg/services/object/ape/checker_test.go
@@ -219,7 +219,7 @@ func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
return pk.GetScriptHash()
}
-func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
+func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) {
v, ok := f.subjects[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -227,7 +227,7 @@ func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160
return v, nil
}
-func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) {
v, ok := f.subjectsExtended[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -619,21 +619,21 @@ type netmapStub struct {
currentEpoch uint64
}
-func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
+func (s *netmapStub) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
if diff >= s.currentEpoch {
return nil, errors.New("invalid diff")
}
- return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
+ return s.GetNetMapByEpoch(s.currentEpoch - diff)
}
-func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
+func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, errors.New("netmap not found")
}
-func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
+func (s *netmapStub) Epoch() (uint64, error) {
return s.currentEpoch, nil
}
@@ -641,14 +641,14 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container
}
-func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
+func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
-func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
+func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
return nil, nil
}
diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go
index 82e660a7f..1b2024ed5 100644
--- a/pkg/services/object/ape/errors.go
+++ b/pkg/services/object/ape/errors.go
@@ -1,34 +1,10 @@
package ape
import (
- "errors"
-
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
-var (
- errMissingContainerID = malformedRequestError("missing container ID")
- errEmptyVerificationHeader = malformedRequestError("empty verification header")
- errEmptyBodySig = malformedRequestError("empty at body signature")
- errInvalidSessionSig = malformedRequestError("invalid session token signature")
- errInvalidSessionOwner = malformedRequestError("invalid session token owner")
- errInvalidVerb = malformedRequestError("session token verb is invalid")
-)
-
-func malformedRequestError(reason string) error {
- invalidArgErr := &apistatus.InvalidArgument{}
- invalidArgErr.SetMessage(reason)
- return invalidArgErr
-}
-
func toStatusErr(err error) error {
- var chRouterErr *checkercore.ChainRouterError
- if !errors.As(err, &chRouterErr) {
- errServerInternal := &apistatus.ServerInternal{}
- apistatus.WriteInternalServerErr(errServerInternal, err)
- return errServerInternal
- }
errAccessDenied := &apistatus.ObjectAccessDenied{}
errAccessDenied.WriteReason("ape denied request: " + err.Error())
return errAccessDenied
diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go
deleted file mode 100644
index 102985aa6..000000000
--- a/pkg/services/object/ape/metadata.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package ape
-
-import (
- "context"
- "encoding/hex"
- "errors"
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-type Metadata struct {
- Container cid.ID
- Object *oid.ID
- MetaHeader *session.RequestMetaHeader
- VerificationHeader *session.RequestVerificationHeader
- SessionToken *sessionSDK.Object
- BearerToken *bearer.Token
-}
-
-func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) {
- if m.VerificationHeader == nil {
- return nil, nil, errEmptyVerificationHeader
- }
-
- if m.BearerToken != nil && m.BearerToken.Impersonate() {
- return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes())
- }
-
- // if session token is presented, use it as truth source
- if m.SessionToken != nil {
- // verify signature of session token
- return ownerFromToken(m.SessionToken)
- }
-
- // otherwise get original body signature
- bodySignature := originalBodySignature(m.VerificationHeader)
- if bodySignature == nil {
- return nil, nil, errEmptyBodySig
- }
-
- return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
-}
-
-// RequestInfo contains request information extracted by request metadata.
-type RequestInfo struct {
- // Role defines under which role this request is executed.
- // It must be represented only as a constant represented in native schema.
- Role string
-
- ContainerOwner user.ID
-
- ContainerAttributes map[string]string
-
- // Namespace defines to which namespace a container is belonged.
- Namespace string
-
- // HEX-encoded sender key.
- SenderKey string
-}
-
-type RequestInfoExtractor interface {
- GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error)
-}
-
-type extractor struct {
- containers container.Source
-
- nm netmap.Source
-
- classifier objectCore.SenderClassifier
-}
-
-func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor {
- return &extractor{
- containers: containers,
- nm: nm,
- classifier: objectCore.NewSenderClassifier(irFetcher, nm, log),
- }
-}
-
-func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error {
- currentEpoch, err := e.nm.Epoch(ctx)
- if err != nil {
- return errors.New("can't fetch current epoch")
- }
- if sessionToken.ExpiredAt(currentEpoch) {
- return new(apistatus.SessionTokenExpired)
- }
- if sessionToken.InvalidAt(currentEpoch) {
- return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch)
- }
- if !assertVerb(*sessionToken, method) {
- return errInvalidVerb
- }
- return nil
-}
-
-func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) {
- cnr, err := e.containers.Get(ctx, m.Container)
- if err != nil {
- return ri, err
- }
-
- if m.SessionToken != nil {
- if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil {
- return ri, err
- }
- }
-
- ownerID, ownerKey, err := m.RequestOwner()
- if err != nil {
- return ri, err
- }
- res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value)
- if err != nil {
- return ri, err
- }
-
- ri.Role = nativeSchemaRole(res.Role)
- ri.ContainerOwner = cnr.Value.Owner()
-
- ri.ContainerAttributes = map[string]string{}
- for key, val := range cnr.Value.Attributes() {
- ri.ContainerAttributes[key] = val
- }
-
- cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
- if hasNamespace {
- ri.Namespace = cnrNamespace
- }
-
- // it is assumed that at the moment the key will be valid,
- // otherwise the request would not pass validation
- ri.SenderKey = hex.EncodeToString(res.Key)
-
- return ri, nil
-}
-
-func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
- var sTok *sessionSDK.Object
-
- if tokV2 != nil {
- sTok = new(sessionSDK.Object)
-
- err := sTok.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
- // if session relates to object's removal, we don't check
- // relation of the tombstone to the session here since user
- // can't predict tomb's ID.
- err = assertSessionRelation(*sTok, cnr, nil)
- } else {
- err = assertSessionRelation(*sTok, cnr, obj)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- return sTok, nil
-}
diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go
index 39dd7f476..cb9bbf1b8 100644
--- a/pkg/services/object/ape/request.go
+++ b/pkg/services/object/ape/request.go
@@ -57,16 +57,11 @@ func resourceName(cid cid.ID, oid *oid.ID, namespace string) string {
}
// objectProperties collects object properties from address parameters and a header if it is passed.
-func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string {
+func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string {
objectProps := map[string]string{
nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(),
}
- for attrName, attrValue := range cnrAttrs {
- prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName)
- objectProps[prop] = attrValue
- }
-
objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString()
if oid != nil {
@@ -145,7 +140,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
reqProps[xheadKey] = xhead.GetValue()
}
- reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm)
+ reqProps, err = c.fillWithUserClaimTags(reqProps, prm)
if err != nil {
return defaultRequest, err
}
@@ -160,7 +155,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
prm.Method,
aperequest.NewResource(
resourceName(prm.Container, prm.Object, prm.Namespace),
- objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header),
+ objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header),
),
reqProps,
), nil
@@ -182,7 +177,7 @@ func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, heade
return nil, fmt.Errorf("EC parent object ID format error: %w", err)
}
// only container node have access to collect parent object
- contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container)
+ contNode, err := c.currentNodeIsContainerNode(prm.Container)
if err != nil {
return nil, fmt.Errorf("check container node status: %w", err)
}
@@ -205,13 +200,13 @@ func isLogicalError(err error) bool {
return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound)
}
-func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) {
- cnr, err := c.cnrSource.Get(ctx, cnrID)
+func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) {
+ cnr, err := c.cnrSource.Get(cnrID)
if err != nil {
return false, err
}
- nm, err := netmap.GetLatestNetworkMap(ctx, c.nm)
+ nm, err := netmap.GetLatestNetworkMap(c.nm)
if err != nil {
return false, err
}
@@ -225,7 +220,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.
return true, nil
}
- nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm)
+ nm, err = netmap.GetPreviousNetworkMap(c.nm)
if err != nil {
return false, err
}
@@ -234,7 +229,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) {
+func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
@@ -242,7 +237,7 @@ func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[st
if err != nil {
return nil, err
}
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk)
+ props, err := aperequest.FormFrostfsIDRequestProperties(c.frostFSIDClient, pk)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go
index fcf7c4c40..787785b60 100644
--- a/pkg/services/object/ape/request_test.go
+++ b/pkg/services/object/ape/request_test.go
@@ -7,7 +7,6 @@ import (
"testing"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -20,20 +19,11 @@ import (
)
const (
- testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y"
+ testOwnerID = "FPPtmAi9TCX329"
incomingIP = "192.92.33.1"
-
- testSysAttrName = "unittest"
-
- testSysAttrZone = "eggplant"
)
-var containerAttrs = map[string]string{
- cnrV2.SysAttributeName: testSysAttrName,
- cnrV2.SysAttributeZone: testSysAttrZone,
-}
-
func ctxWithPeerInfo() context.Context {
return peer.NewContext(context.Background(), &peer.Peer{
Addr: &net.TCPAddr{
@@ -115,7 +105,7 @@ func TestObjectProperties(t *testing.T) {
var testCnrOwner user.ID
require.NoError(t, testCnrOwner.DecodeString(testOwnerID))
- props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader())
+ props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader())
require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID])
require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID])
@@ -134,8 +124,6 @@ func TestObjectProperties(t *testing.T) {
require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType])
require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash])
require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash])
- require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)])
- require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)])
for _, attr := range test.header.attributes {
require.Equal(t, attr.val, props[attr.key])
@@ -257,10 +245,6 @@ func TestNewAPERequest(t *testing.T) {
Role: role,
SenderKey: senderKey,
ContainerOwner: testCnrOwner,
- ContainerAttributes: map[string]string{
- cnrV2.SysAttributeZone: testSysAttrZone,
- cnrV2.SysAttributeName: testSysAttrName,
- },
}
headerSource := newHeaderProviderMock()
@@ -293,7 +277,7 @@ func TestNewAPERequest(t *testing.T) {
method,
aperequest.NewResource(
resourceName(cnr, obj, prm.Namespace),
- objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header {
+ objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
if headerObjSDK != nil {
return headerObjSDK.ToV2().GetHeader()
}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index 5e04843f3..c114f02f6 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -2,6 +2,9 @@ package ape
import (
"context"
+ "encoding/hex"
+ "errors"
+ "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@@ -9,18 +12,19 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
)
+var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
+
type Service struct {
apeChecker Checker
- extractor RequestInfoExtractor
-
next objectSvc.ServiceServer
}
@@ -60,10 +64,9 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
}
}
-func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service {
+func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service {
return &Service{
apeChecker: apeChecker,
- extractor: extractor,
next: next,
}
}
@@ -73,9 +76,17 @@ type getStreamBasicChecker struct {
apeChecker Checker
- metadata Metadata
+ namespace string
- reqInfo RequestInfo
+ senderKey []byte
+
+ containerOwner user.ID
+
+ role string
+
+ softAPECheck bool
+
+ bearerToken *bearer.Token
}
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
@@ -86,17 +97,17 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
}
prm := Prm{
- Namespace: g.reqInfo.Namespace,
- Container: cnrID,
- Object: objID,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodGetObject,
- SenderKey: g.reqInfo.SenderKey,
- ContainerOwner: g.reqInfo.ContainerOwner,
- ContainerAttributes: g.reqInfo.ContainerAttributes,
- Role: g.reqInfo.Role,
- BearerToken: g.metadata.BearerToken,
- XHeaders: resp.GetMetaHeader().GetXHeaders(),
+ Namespace: g.namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodGetObject,
+ SenderKey: hex.EncodeToString(g.senderKey),
+ ContainerOwner: g.containerOwner,
+ Role: g.role,
+ SoftAPECheck: g.softAPECheck,
+ BearerToken: g.bearerToken,
+ XHeaders: resp.GetMetaHeader().GetXHeaders(),
}
if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil {
@@ -106,54 +117,66 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
return g.GetObjectStream.Send(resp)
}
+func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) {
+ untyped := ctx.Value(objectSvc.RequestContextKey)
+ if untyped == nil {
+ return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey)
+ }
+ rc, ok := untyped.(*objectSvc.RequestContext)
+ if !ok {
+ return nil, errFailedToCastToRequestContext
+ }
+ return rc, nil
+}
+
func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ reqCtx, err := requestContext(stream.Context())
if err != nil {
- return err
- }
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject)
- if err != nil {
- return err
+ return toStatusErr(err)
}
+
return c.next.Get(request, &getStreamBasicChecker{
GetObjectStream: stream,
apeChecker: c.apeChecker,
- metadata: md,
- reqInfo: reqInfo,
+ namespace: reqCtx.Namespace,
+ senderKey: reqCtx.SenderKey,
+ containerOwner: reqCtx.ContainerOwner,
+ role: nativeSchemaRole(reqCtx.Role),
+ softAPECheck: reqCtx.SoftAPECheck,
+ bearerToken: reqCtx.BearerToken,
})
}
type putStreamBasicChecker struct {
apeChecker Checker
- extractor RequestInfoExtractor
-
next objectSvc.PutObjectStream
}
func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
+ reqCtx, err := requestContext(ctx)
if err != nil {
- return err
+ return toStatusErr(err)
}
- reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
+
+ cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
if err != nil {
- return err
+ return toStatusErr(err)
}
prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodPutObject,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- Role: reqInfo.Role,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -168,12 +191,11 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR
return p.next.CloseAndRecv(ctx)
}
-func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
- streamer, err := c.next.Put(ctx)
+func (c *Service) Put() (objectSvc.PutObjectStream, error) {
+ streamer, err := c.next.Put()
return &putStreamBasicChecker{
apeChecker: c.apeChecker,
- extractor: c.extractor,
next: streamer,
}, err
}
@@ -181,8 +203,6 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
type patchStreamBasicChecker struct {
apeChecker Checker
- extractor RequestInfoExtractor
-
next objectSvc.PatchObjectStream
nonFirstSend bool
@@ -192,26 +212,27 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa
if !p.nonFirstSend {
p.nonFirstSend = true
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ reqCtx, err := requestContext(ctx)
if err != nil {
- return err
+ return toStatusErr(err)
}
- reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject)
+
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return err
+ return toStatusErr(err)
}
prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodPatchObject,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- Role: reqInfo.Role,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodPatchObject,
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -226,22 +247,22 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa
return p.next.CloseAndRecv(ctx)
}
-func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) {
- streamer, err := c.next.Patch(ctx)
+func (c *Service) Patch() (objectSvc.PatchObjectStream, error) {
+ streamer, err := c.next.Patch()
return &patchStreamBasicChecker{
apeChecker: c.apeChecker,
- extractor: c.extractor,
next: streamer,
}, err
}
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject)
+
+ reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
@@ -255,7 +276,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
switch headerPart := resp.GetBody().GetHeaderPart().(type) {
case *objectV2.ShortHeader:
cidV2 := new(refs.ContainerID)
- md.Container.WriteToV2(cidV2)
+ cnrID.WriteToV2(cidV2)
header.SetContainerID(cidV2)
header.SetVersion(headerPart.GetVersion())
header.SetCreationEpoch(headerPart.GetCreationEpoch())
@@ -271,17 +292,17 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: header,
- Method: nativeschema.MethodHeadObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: header,
+ Method: nativeschema.MethodHeadObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@@ -290,25 +311,28 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
- md, err := newMetadata(request, request.GetBody().GetContainerID(), nil)
- if err != nil {
- return err
+ var cnrID cid.ID
+ if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil {
+ if err := cnrID.ReadFromV2(*cnrV2); err != nil {
+ return toStatusErr(err)
+ }
}
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject)
+
+ reqCtx, err := requestContext(stream.Context())
if err != nil {
- return err
+ return toStatusErr(err)
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Method: nativeschema.MethodSearchObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Method: nativeschema.MethodSearchObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@@ -318,26 +342,27 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc
}
func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject)
+
+ reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodDeleteObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodDeleteObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@@ -352,26 +377,27 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (
}
func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return err
+ return toStatusErr(err)
}
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject)
+
+ reqCtx, err := requestContext(stream.Context())
if err != nil {
- return err
+ return toStatusErr(err)
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodRangeObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodRangeObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@@ -381,26 +407,27 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G
}
func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject)
+
+ reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodHashObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodHashObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
}
resp, err := c.next.GetRangeHash(ctx, request)
@@ -415,27 +442,28 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa
}
func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
if err != nil {
return nil, err
}
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
+
+ reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: request.GetBody().GetObject().GetHeader(),
- Method: nativeschema.MethodPutObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: request.GetBody().GetObject().GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
}
if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -445,36 +473,18 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ
return c.next.PutSingle(ctx, request)
}
-type request interface {
- GetMetaHeader() *session.RequestMetaHeader
- GetVerificationHeader() *session.RequestVerificationHeader
-}
-
-func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) {
- meta := request.GetMetaHeader()
- for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
- meta = origin
+func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
+ if cidV2 != nil {
+ if err = cnrID.ReadFromV2(*cidV2); err != nil {
+ return
+ }
}
- cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2)
- if err != nil {
- return
- }
- session, err := readSessionToken(cnrID, objID, meta.GetSessionToken())
- if err != nil {
- return
- }
- bearer, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return
- }
-
- md = Metadata{
- Container: cnrID,
- Object: objID,
- VerificationHeader: request.GetVerificationHeader(),
- SessionToken: session,
- BearerToken: bearer,
+ if objV2 != nil {
+ objID = new(oid.ID)
+ if err = objID.ReadFromV2(*objV2); err != nil {
+ return
+ }
}
return
}
diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go
index 97dbfa658..46e55360d 100644
--- a/pkg/services/object/ape/types.go
+++ b/pkg/services/object/ape/types.go
@@ -7,11 +7,3 @@ import "context"
type Checker interface {
CheckAPE(context.Context, Prm) error
}
-
-// InnerRingFetcher is an interface that must provide
-// Inner Ring information.
-type InnerRingFetcher interface {
- // InnerRingKeys must return list of public keys of
- // the actual inner ring.
- InnerRingKeys(ctx context.Context) ([][]byte, error)
-}
diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go
deleted file mode 100644
index 916bce427..000000000
--- a/pkg/services/object/ape/util_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package ape
-
-import (
- "slices"
- "testing"
-
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/stretchr/testify/require"
-)
-
-func TestIsVerbCompatible(t *testing.T) {
- table := map[string][]sessionSDK.ObjectVerb{
- nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch},
- nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete},
- nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet},
- nativeschema.MethodHeadObject: {
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectPatch,
- },
- nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch},
- nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash},
- nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
- nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch},
- }
-
- verbs := []sessionSDK.ObjectVerb{
- sessionSDK.VerbObjectPut,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectSearch,
- sessionSDK.VerbObjectPatch,
- }
-
- var tok sessionSDK.Object
-
- for op, list := range table {
- for _, verb := range verbs {
- contains := slices.Contains(list, verb)
-
- tok.ForVerb(verb)
-
- require.Equal(t, contains, assertVerb(tok, op),
- "%v in token, %s executing", verb, op)
- }
- }
-}
-
-func TestAssertSessionRelation(t *testing.T) {
- var tok sessionSDK.Object
- cnr := cidtest.ID()
- cnrOther := cidtest.ID()
- obj := oidtest.ID()
- objOther := oidtest.ID()
-
- // make sure ids differ, otherwise test won't work correctly
- require.False(t, cnrOther.Equals(cnr))
- require.False(t, objOther.Equals(obj))
-
- // bind session to the container (required)
- tok.BindContainer(cnr)
-
- // test container-global session
- require.NoError(t, assertSessionRelation(tok, cnr, nil))
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnrOther, nil))
- require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
-
- // limit the session to the particular object
- tok.LimitByObjects(obj)
-
- // test fixed object session (here obj arg must be non-nil everywhere)
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnr, &objOther))
-}
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
index f8ee089fe..b42084634 100644
--- a/pkg/services/object/audit.go
+++ b/pkg/services/object/audit.go
@@ -37,7 +37,7 @@ func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (*
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return res, err
}
@@ -48,7 +48,7 @@ func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error
if !a.enabled.Load() {
return err
}
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return err
}
@@ -59,7 +59,7 @@ func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRan
if !a.enabled.Load() {
return err
}
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return err
}
@@ -70,7 +70,7 @@ func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHas
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return resp, err
}
@@ -81,19 +81,19 @@ func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*obje
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return resp, err
}
// Put implements ServiceServer.
-func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) {
- res, err := a.next.Put(ctx)
+func (a *auditService) Put() (PutObjectStream, error) {
+ res, err := a.next.Put()
if !a.enabled.Load() {
return res, err
}
if err != nil {
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
return res, err
}
return &auditPutStream{
@@ -108,7 +108,7 @@ func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleReque
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(),
req.GetBody().GetObject().GetObjectID()),
err == nil)
@@ -121,7 +121,7 @@ func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) er
if !a.enabled.Load() {
return err
}
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return err
}
@@ -145,7 +145,7 @@ func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse,
a.failed = true
}
a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
return resp, err
@@ -163,8 +163,8 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error
if err != nil {
a.failed = true
}
- if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
}
@@ -183,13 +183,13 @@ type auditPatchStream struct {
nonFirstSend bool
}
-func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) {
- res, err := a.next.Patch(ctx)
+func (a *auditService) Patch() (PatchObjectStream, error) {
+ res, err := a.next.Patch()
if !a.enabled.Load() {
return res, err
}
if err != nil {
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
return res, err
}
return &auditPatchStream{
@@ -205,7 +205,7 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo
a.failed = true
}
a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
return resp, err
@@ -224,8 +224,8 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e
if err != nil {
a.failed = true
}
- if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
}
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index ef65e78bc..758156607 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -40,20 +40,20 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
return x.nextHandler.Get(req, stream)
}
-func (x *Common) Put(ctx context.Context) (PutObjectStream, error) {
+func (x *Common) Put() (PutObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Put(ctx)
+ return x.nextHandler.Put()
}
-func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) {
+func (x *Common) Patch() (PatchObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Patch(ctx)
+ return x.nextHandler.Patch()
}
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index f2bd907db..9e0f49297 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -1,7 +1,6 @@
package target
import (
- "context"
"errors"
"fmt"
@@ -14,20 +13,20 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+func New(prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
// prepare needed put parameters
- if err := preparePrm(ctx, &prm); err != nil {
+ if err := preparePrm(&prm); err != nil {
return nil, fmt.Errorf("could not prepare put parameters: %w", err)
}
if prm.Header.Signature() != nil {
- return newUntrustedTarget(ctx, &prm)
+ return newUntrustedTarget(&prm)
}
- return newTrustedTarget(ctx, &prm)
+ return newTrustedTarget(&prm)
}
-func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
+func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
}
@@ -49,9 +48,9 @@ func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transfor
}, nil
}
-func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
prm.Relay = nil // do not relay request without signature
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
}
@@ -89,8 +88,10 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme
if !ownerObj.Equals(ownerSession) {
return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession)
}
- } else if !ownerObj.Equals(sessionInfo.Owner) {
- return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
+ } else {
+ if !ownerObj.Equals(sessionInfo.Owner) {
+ return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
+ }
}
if prm.SignRequestPrivateKey == nil {
@@ -110,11 +111,11 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme
}, nil
}
-func preparePrm(ctx context.Context, prm *objectwriter.Params) error {
+func preparePrm(prm *objectwriter.Params) error {
var err error
// get latest network map
- nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource)
+ nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource)
if err != nil {
return fmt.Errorf("could not get latest network map: %w", err)
}
@@ -125,7 +126,7 @@ func preparePrm(ctx context.Context, prm *objectwriter.Params) error {
}
// get container to store the object
- cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr)
+ cnrInfo, err := prm.Config.ContainerSource.Get(idCnr)
if err != nil {
return fmt.Errorf("could not get container by ID: %w", err)
}
diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go
index 6593d3ca0..6689557ee 100644
--- a/pkg/services/object/common/writer/common.go
+++ b/pkg/services/object/common/writer/common.go
@@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
}
func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
- traverser, err := placement.NewTraverser(ctx, n.Opts...)
+ traverser, err := placement.NewTraverser(n.Traversal.Opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
@@ -56,10 +56,10 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
}
// perform additional container broadcast if needed
- if n.submitPrimaryPlacementFinish() {
+ if n.Traversal.submitPrimaryPlacementFinish() {
err := n.ForEachNode(ctx, f)
if err != nil {
- n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+ n.cfg.Logger.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -79,29 +79,33 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.
continue
}
- isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey())
+ workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey())
item := new(bool)
wg.Add(1)
- go func() {
+ if err := workerPool.Submit(func() {
defer wg.Done()
err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr})
if err != nil {
resErr.Store(err)
- svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err)
+ svcutil.LogServiceError(n.cfg.Logger, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
- }()
+ }); err != nil {
+ wg.Done()
+ svcutil.LogWorkerPoolError(n.cfg.Logger, "PUT", err)
+ return true
+ }
// Mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
- n.submitProcessed(addr, item)
+ n.Traversal.submitProcessed(addr, item)
}
wg.Wait()
diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go
index fff58aca7..f7486eae7 100644
--- a/pkg/services/object/common/writer/distributed.go
+++ b/pkg/services/object/common/writer/distributed.go
@@ -95,10 +95,6 @@ func (x errIncompletePut) Error() string {
return commonMsg
}
-func (x errIncompletePut) Unwrap() error {
- return x.singleErr
-}
-
// WriteObject implements the transformer.ObjectWriter interface.
func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
t.obj = obj
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index 26a53e315..571bae7bb 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -14,7 +14,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
@@ -85,7 +84,7 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
}
func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) {
- currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx)
+ currentNodeIsContainerNode, err := e.currentNodeIsContainerNode()
if err != nil {
return false, false, err
}
@@ -108,8 +107,8 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O
return true, currentNodeIsContainerNode, nil
}
-func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) {
- t, err := placement.NewTraverser(ctx, e.PlacementOpts...)
+func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
+ t, err := placement.NewTraverser(e.PlacementOpts...)
if err != nil {
return false, err
}
@@ -128,7 +127,7 @@ func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error)
}
func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -149,11 +148,21 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
- err = e.Relay(ctx, info, c)
+ completed := make(chan interface{})
+ if poolErr := e.Config.RemotePool.Submit(func() {
+ defer close(completed)
+ err = e.Relay(ctx, info, c)
+ }); poolErr != nil {
+ close(completed)
+ svcutil.LogWorkerPoolError(e.Config.Logger, "PUT", poolErr)
+ return poolErr
+ }
+ <-completed
+
if err == nil {
return nil
}
- e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
+ e.Config.Logger.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
lastErr = err
}
}
@@ -170,7 +179,7 @@ func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error
return e.writePartLocal(ctx, obj)
}
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
if err != nil {
return err
}
@@ -207,7 +216,7 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
}
partsProcessed := make([]atomic.Bool, len(parts))
objID, _ := obj.ID()
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -265,10 +274,8 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
err := e.putECPartToNode(ctx, obj, node)
if err == nil {
return nil
- } else if clientSDK.IsErrObjectAlreadyRemoved(err) {
- return err
}
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
@@ -292,7 +299,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -316,7 +323,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -333,11 +340,21 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n
}
func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
+ var err error
localTarget := LocalTarget{
Storage: e.Config.LocalStore,
Container: e.Container,
}
- return localTarget.WriteObject(ctx, obj, e.ObjectMeta)
+ completed := make(chan interface{})
+ if poolErr := e.Config.LocalPool.Submit(func() {
+ defer close(completed)
+ err = localTarget.WriteObject(ctx, obj, e.ObjectMeta)
+ }); poolErr != nil {
+ close(completed)
+ return poolErr
+ }
+ <-completed
+ return err
}
func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
@@ -351,5 +368,15 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n
nodeInfo: clientNodeInfo,
}
- return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
+ var err error
+ completed := make(chan interface{})
+ if poolErr := e.Config.RemotePool.Submit(func() {
+ defer close(completed)
+ err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
+ }); poolErr != nil {
+ close(completed)
+ return poolErr
+ }
+ <-completed
+ return err
}
diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go
index d5eeddf21..8b2599e5f 100644
--- a/pkg/services/object/common/writer/ec_test.go
+++ b/pkg/services/object/common/writer/ec_test.go
@@ -7,7 +7,6 @@ import (
"crypto/sha256"
"errors"
"fmt"
- "slices"
"strconv"
"testing"
@@ -31,6 +30,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
)
@@ -38,10 +38,11 @@ type testPlacementBuilder struct {
vectors [][]netmap.NodeInfo
}
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
+func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
[][]netmap.NodeInfo, error,
) {
- arr := slices.Clone(p.vectors[0])
+ arr := make([]netmap.NodeInfo, len(p.vectors[0]))
+ copy(arr, p.vectors[0])
return [][]netmap.NodeInfo{arr}, nil
}
@@ -130,13 +131,17 @@ func TestECWriter(t *testing.T) {
nodeKey, err := keys.NewPrivateKey()
require.NoError(t, err)
- log, err := logger.NewLogger(logger.Prm{})
+ pool, err := ants.NewPool(4, ants.WithNonblocking(true))
+ require.NoError(t, err)
+
+ log, err := logger.NewLogger(nil)
require.NoError(t, err)
var n nmKeys
ecw := ECWriter{
Config: &Config{
NetmapKeys: n,
+ RemotePool: pool,
Logger: log,
ClientConstructor: clientConstructor{vectors: ns},
KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil),
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
index d3d2b41b4..0e4c4d9c6 100644
--- a/pkg/services/object/common/writer/writer.go
+++ b/pkg/services/object/common/writer/writer.go
@@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -23,7 +24,7 @@ type MaxSizeSource interface {
// of physically stored object in system.
//
// Must return 0 if value can not be obtained.
- MaxObjectSize(context.Context) uint64
+ MaxObjectSize() uint64
}
type ClientConstructor interface {
@@ -31,7 +32,7 @@ type ClientConstructor interface {
}
type InnerRing interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
+ InnerRingKeys() ([][]byte, error)
}
type FormatValidatorConfig interface {
@@ -51,6 +52,8 @@ type Config struct {
NetmapSource netmap.Source
+ RemotePool, LocalPool util.WorkerPool
+
NetmapKeys netmap.AnnouncedKeys
FormatValidator *object.FormatValidator
@@ -66,6 +69,12 @@ type Config struct {
type Option func(*Config)
+func WithWorkerPools(remote, local util.WorkerPool) Option {
+ return func(c *Config) {
+ c.RemotePool, c.LocalPool = remote, local
+ }
+}
+
func WithLogger(l *logger.Logger) Option {
return func(c *Config) {
c.Logger = l
@@ -78,6 +87,13 @@ func WithVerifySessionTokenIssuer(v bool) Option {
}
}
+func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) {
+ if c.NetmapKeys.IsLocalKey(pub) {
+ return c.LocalPool, true
+ }
+ return c.RemotePool, false
+}
+
type Params struct {
Config *Config
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 57e33fde7..88454625d 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -33,13 +33,13 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ServingRequest)
+ exec.log.Debug(logs.ServingRequest)
if err := exec.executeLocal(ctx); err != nil {
- exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
+ exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
return err
}
- exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
return nil
}
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index a99ba3586..ec771320e 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "slices"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -35,13 +34,13 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = l.With(
+ exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "DELETE"),
zap.Stringer("address", exec.address()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )
+ )}
}
func (exec *execCtx) isLocal() bool {
@@ -84,16 +83,16 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
exec.splitInfo = errSplitInfo.SplitInfo()
exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
- exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
+ exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
if err := exec.collectMembers(ctx); err != nil {
return err
}
- exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected)
+ exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
return nil
case errors.As(err, &errECInfo):
- exec.log.Debug(ctx, logs.DeleteECObjectReceived)
+ exec.log.Debug(logs.DeleteECObjectReceived)
return nil
}
@@ -109,7 +108,7 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
func (exec *execCtx) collectMembers(ctx context.Context) error {
if exec.splitInfo == nil {
- exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY)
+ exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
return nil
}
@@ -132,7 +131,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error {
func (exec *execCtx) collectChain(ctx context.Context) error {
var chain []oid.ID
- exec.log.Debug(ctx, logs.DeleteAssemblingChain)
+ exec.log.Debug(logs.DeleteAssemblingChain)
for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
chain = append(chain, prev)
@@ -153,7 +152,7 @@ func (exec *execCtx) collectChain(ctx context.Context) error {
}
func (exec *execCtx) collectChildren(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteCollectingChildren)
+ exec.log.Debug(logs.DeleteCollectingChildren)
children, err := exec.svc.header.children(ctx, exec)
if err != nil {
@@ -166,7 +165,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) error {
}
func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteSupplementBySplitID)
+ exec.log.Debug(logs.DeleteSupplementBySplitID)
chain, err := exec.svc.searcher.splitMembers(ctx, exec)
if err != nil {
@@ -183,7 +182,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
for i := range members {
for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body
if members[i].Equals(incoming[j]) {
- incoming = slices.Delete(incoming, j, j+1)
+ incoming = append(incoming[:j], incoming[j+1:]...)
j--
}
}
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 01b2d9b3f..2c3c47f49 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -10,13 +10,13 @@ import (
)
func (exec *execCtx) executeLocal(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure)
+ exec.log.Debug(logs.DeleteFormingTombstoneStructure)
if err := exec.formTombstone(ctx); err != nil {
return err
}
- exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
+ exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
return exec.saveTombstone(ctx)
}
@@ -33,7 +33,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) error {
)
exec.addMembers([]oid.ID{exec.address().Object()})
- exec.log.Debug(ctx, logs.DeleteFormingSplitInfo)
+ exec.log.Debug(logs.DeleteFormingSplitInfo)
if err := exec.formExtendedInfo(ctx); err != nil {
return fmt.Errorf("form extended info: %w", err)
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index 1c4d7d585..e4f7a8c50 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -72,7 +72,7 @@ func New(gs *getsvc.Service,
opts ...Option,
) *Service {
c := &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
header: &headSvcWrapper{s: gs},
searcher: &searchSvcWrapper{s: ss},
placer: &putSvcWrapper{s: ps},
@@ -92,6 +92,6 @@ func New(gs *getsvc.Service,
// WithLogger returns option to specify Delete service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "objectSDK.Delete service"))}
}
}
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index e80132489..9f17f1e4c 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -13,7 +13,7 @@ import (
func (r *request) assemble(ctx context.Context) {
if !r.canAssembleComplexObject() {
- r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
+ r.log.Debug(logs.GetCanNotAssembleTheObject)
return
}
@@ -35,23 +35,23 @@ func (r *request) assemble(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(ctx, logs.GetTryingToAssembleTheObject)
+ r.log.Debug(logs.GetTryingToAssembleTheObject)
r.prm.common = r.prm.common.WithLocalOnly(false)
assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly())
- r.log.Debug(ctx, logs.GetAssemblingSplittedObject,
+ r.log.Debug(logs.GetAssemblingSplittedObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted,
+ defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil {
- r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject,
+ r.log.Warn(logs.GetFailedToAssembleSplittedObject,
zap.Error(err),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
@@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque
detachedExecutor.execute(ctx)
- return detachedExecutor.err
+ return detachedExecutor.statusError.err
}
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
index 59dd7fd93..03f913bbf 100644
--- a/pkg/services/object/get/assembleec.go
+++ b/pkg/services/object/get/assembleec.go
@@ -12,7 +12,7 @@ import (
func (r *request) assembleEC(ctx context.Context) {
if r.isRaw() {
- r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
+ r.log.Debug(logs.GetCanNotAssembleTheObject)
return
}
@@ -34,10 +34,10 @@ func (r *request) assembleEC(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject)
+ r.log.Debug(logs.GetTryingToAssembleTheECObject)
// initialize epoch number
- ok := r.initEpoch(ctx)
+ ok := r.initEpoch()
if !ok {
return
}
@@ -45,18 +45,18 @@ func (r *request) assembleEC(ctx context.Context) {
r.prm.common = r.prm.common.WithLocalOnly(false)
assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
- r.log.Debug(ctx, logs.GetAssemblingECObject,
+ r.log.Debug(logs.GetAssemblingECObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted,
+ defer r.log.Debug(logs.GetAssemblingECObjectCompleted,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) {
- r.log.Warn(ctx, logs.GetFailedToAssembleECObject,
+ r.log.Warn(logs.GetFailedToAssembleECObject,
zap.Error(err),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go
index b24c9417b..ff3f90bf2 100644
--- a/pkg/services/object/get/assembler.go
+++ b/pkg/services/object/get/assembler.go
@@ -2,7 +2,6 @@ package getsvc
import (
"context"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -60,24 +59,53 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS
if previousID == nil && len(childrenIDs) == 0 {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
}
-
if len(childrenIDs) > 0 {
- if a.rng != nil {
- err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer)
- } else {
- err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer)
+ if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil {
+ return nil, err
}
} else {
- if a.rng != nil {
- err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer)
- } else {
- err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer)
+ if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil {
+ return nil, err
}
}
+ return a.parentObject, nil
+}
+
+func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ var sourceObjectIDs []oid.ID
+ sourceObjectID, ok := a.splitInfo.Link()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ sourceObjectID, ok = a.splitInfo.LastPart()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ if len(sourceObjectIDs) == 0 {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ for _, sourceObjectID = range sourceObjectIDs {
+ obj, err := a.getParent(ctx, sourceObjectID, writer)
+ if err == nil {
+ return obj, nil
+ }
+ }
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+}
+
+func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
if err != nil {
return nil, err
}
- return a.parentObject, nil
+ parent := obj.Parent()
+ if parent == nil {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ if err := writer.WriteHeader(ctx, parent); err != nil {
+ return nil, err
+ }
+ return obj, nil
}
func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) {
@@ -162,16 +190,26 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD
}
func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ if a.rng == nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
+ }
+ return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true)
+ }
+
+ if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
return err
}
- return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true)
+ return writer.WriteChunk(ctx, a.parentObject.Payload())
}
func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
+ if a.rng == nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
+ }
}
+
if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil {
return err
}
@@ -181,9 +219,16 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev
return nil
}
-func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error {
+func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error {
+ withRng := len(partRanges) > 0 && a.rng != nil
+
for i := range partIDs {
- _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer)
+ var r *objectSDK.Range
+ if withRng {
+ r = &partRanges[i]
+ }
+
+ _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer)
if err != nil {
return err
}
@@ -192,13 +237,22 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec
}
func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, err := a.buildChain(ctx, prevID)
+ chain, rngs, err := a.buildChain(ctx, prevID)
if err != nil {
return err
}
- slices.Reverse(chain)
- return a.assemblePayloadByObjectIDs(ctx, writer, chain, false)
+ reverseRngs := len(rngs) > 0
+
+ for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 {
+ chain[left], chain[right] = chain[right], chain[left]
+
+ if reverseRngs {
+ rngs[left], rngs[right] = rngs[right], rngs[left]
+ }
+ }
+
+ return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false)
}
func (a *assembler) isChild(obj *objectSDK.Object) bool {
@@ -206,28 +260,63 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool {
return parent == nil || equalAddresses(a.addr, object.AddressOf(parent))
}
-func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) {
+func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
var (
chain []oid.ID
+ rngs []objectSDK.Range
+ from = a.rng.GetOffset()
+ to = from + a.rng.GetLength()
hasPrev = true
)
// fill the chain end-to-start
for hasPrev {
- head, err := a.objGetter.HeadObject(ctx, prevID)
- if err != nil {
- return nil, err
- }
- if !a.isChild(head) {
- return nil, errParentAddressDiffers
+ // check that only for "range" requests,
+ // for `GET` it stops via the false `withPrev`
+ if a.rng != nil && a.currentOffset <= from {
+ break
}
- id, _ := head.ID()
- chain = append(chain, id)
+ head, err := a.objGetter.HeadObject(ctx, prevID)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !a.isChild(head) {
+ return nil, nil, errParentAddressDiffers
+ }
+
+ if a.rng != nil {
+ sz := head.PayloadSize()
+
+ a.currentOffset -= sz
+
+ if a.currentOffset < to {
+ off := uint64(0)
+ if from > a.currentOffset {
+ off = from - a.currentOffset
+ sz -= from - a.currentOffset
+ }
+
+ if to < a.currentOffset+off+sz {
+ sz = to - off - a.currentOffset
+ }
+
+ index := len(rngs)
+ rngs = append(rngs, objectSDK.Range{})
+ rngs[index].SetOffset(off)
+ rngs[index].SetLength(sz)
+
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
+ } else {
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
prevID, hasPrev = head.PreviousID()
}
- return chain, nil
+ return chain, rngs, nil
}
diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go
deleted file mode 100644
index ff213cb82..000000000
--- a/pkg/services/object/get/assembler_head.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package getsvc
-
-import (
- "context"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- var sourceObjectIDs []oid.ID
- sourceObjectID, ok := a.splitInfo.Link()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- sourceObjectID, ok = a.splitInfo.LastPart()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- if len(sourceObjectIDs) == 0 {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- for _, sourceObjectID = range sourceObjectIDs {
- obj, err := a.getParent(ctx, sourceObjectID, writer)
- if err == nil {
- return obj, nil
- }
- }
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
-}
-
-func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
- obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
- if err != nil {
- return nil, err
- }
- parent := obj.Parent()
- if parent == nil {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- if err := writer.WriteHeader(ctx, parent); err != nil {
- return nil, err
- }
- return obj, nil
-}
diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go
deleted file mode 100644
index 780693c40..000000000
--- a/pkg/services/object/get/assembler_range.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package getsvc
-
-import (
- "context"
- "slices"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
- return err
- }
- return writer.WriteChunk(ctx, a.parentObject.Payload())
-}
-
-func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil {
- return err
- }
- if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part
- return err
- }
- return nil
-}
-
-func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error {
- for i := range partIDs {
- _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, rngs, err := a.buildChainRange(ctx, prevID)
- if err != nil {
- return err
- }
-
- slices.Reverse(chain)
- slices.Reverse(rngs)
- return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs)
-}
-
-func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
- var (
- chain []oid.ID
- rngs []objectSDK.Range
- from = a.rng.GetOffset()
- to = from + a.rng.GetLength()
-
- hasPrev = true
- )
-
- // fill the chain end-to-start
- for hasPrev && from < a.currentOffset {
- head, err := a.objGetter.HeadObject(ctx, prevID)
- if err != nil {
- return nil, nil, err
- }
- if !a.isChild(head) {
- return nil, nil, errParentAddressDiffers
- }
-
- nextOffset := a.currentOffset - head.PayloadSize()
- clampedFrom := max(from, nextOffset)
- clampedTo := min(to, a.currentOffset)
- if clampedFrom < clampedTo {
- index := len(rngs)
- rngs = append(rngs, objectSDK.Range{})
- rngs[index].SetOffset(clampedFrom - nextOffset)
- rngs[index].SetLength(clampedTo - clampedFrom)
-
- id, _ := head.ID()
- chain = append(chain, id)
- }
-
- a.currentOffset = nextOffset
- prevID, hasPrev = head.PreviousID()
- }
-
- return chain, rngs, nil
-}
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index e0a7e1da6..44d9af3a2 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -125,7 +125,7 @@ func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter
func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) {
objID := a.addr.Object()
- trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch)
+ trav, cnr, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch)
if err != nil {
return nil, err
}
@@ -155,7 +155,7 @@ func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Travers
parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount)
if err != nil {
- a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err))
+ a.log.Debug(logs.GetUnableToGetAllPartsECObject, zap.Error(err))
}
return parts
}
@@ -229,7 +229,7 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
var objID oid.ID
err := objID.ReadFromV2(ch.ID)
if err != nil {
- a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
return nil
}
var addr oid.Address
@@ -238,13 +238,15 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
var object *objectSDK.Object
if a.head {
object, err = a.localStorage.Head(ctx, addr, false)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ if err != nil {
+ a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ return nil
}
} else {
object, err = a.localStorage.Get(ctx, addr)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ if err != nil {
+ a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ return nil
}
}
return object
@@ -257,11 +259,11 @@ func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.N
var errECInfo *objectSDK.ECInfoError
_, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true)
if err == nil {
- a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
+ a.log.Error(logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
return nil
}
if !errors.As(err, &errECInfo) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
+ a.log.Warn(logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
return nil
}
result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks))
@@ -275,7 +277,7 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
var objID oid.ID
err := objID.ReadFromV2(ch.ID)
if err != nil {
- a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
return nil
}
var addr oid.Address
@@ -284,13 +286,15 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
var object *objectSDK.Object
if a.head {
object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ if err != nil {
+ a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ return nil
}
} else {
object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ if err != nil {
+ a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ return nil
}
}
return object
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index dfb31133c..034768c81 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -10,25 +10,34 @@ import (
func (r *request) executeOnContainer(ctx context.Context) {
if r.isLocal() {
- r.log.Debug(ctx, logs.GetReturnResultDirectly)
+ r.log.Debug(logs.GetReturnResultDirectly)
return
}
lookupDepth := r.netmapLookupDepth()
- r.log.Debug(ctx, logs.TryingToExecuteInContainer,
+ r.log.Debug(logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- ok := r.initEpoch(ctx)
+ ok := r.initEpoch()
if !ok {
return
}
localStatus := r.status
- for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 {
+ for {
+ if r.processCurrentEpoch(ctx, localStatus) {
+ break
+ }
+
+ // check the maximum depth has been reached
+ if lookupDepth == 0 {
+ break
+ }
+
lookupDepth--
// go to the previous epoch
@@ -37,11 +46,11 @@ func (r *request) executeOnContainer(ctx context.Context) {
}
func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
- r.log.Debug(ctx, logs.ProcessEpoch,
+ r.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", r.curProcEpoch),
)
- traverser, ok := r.generateTraverser(ctx, r.address())
+ traverser, ok := r.generateTraverser(r.address())
if !ok {
return true
}
@@ -58,7 +67,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
+ r.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
return false
}
@@ -66,7 +75,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
for i := range addrs {
select {
case <-ctx.Done():
- r.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
+ r.log.Debug(logs.InterruptPlacementIterationByContext,
zap.Error(ctx.Err()),
)
@@ -82,7 +91,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
client.NodeInfoFromNetmapElement(&info, addrs[i])
if r.processNode(ctx, info) {
- r.log.Debug(ctx, logs.GetCompletingTheOperation)
+ r.log.Debug(logs.GetCompletingTheOperation)
return true
}
}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 3a50308c2..03b7f8bf2 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -87,51 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error {
exec.execute(ctx)
- return exec.err
+ return exec.statusError.err
}
-func (r *request) execute(ctx context.Context) {
- r.log.Debug(ctx, logs.ServingRequest)
+func (exec *request) execute(ctx context.Context) {
+ exec.log.Debug(logs.ServingRequest)
// perform local operation
- r.executeLocal(ctx)
+ exec.executeLocal(ctx)
- r.analyzeStatus(ctx, true)
+ exec.analyzeStatus(ctx, true)
}
-func (r *request) analyzeStatus(ctx context.Context, execCnr bool) {
+func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
- switch r.status {
+ switch exec.status {
case statusOK:
- r.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
case statusINHUMED:
- r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved)
+ exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
case statusVIRTUAL:
- r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual)
- r.assemble(ctx)
+ exec.log.Debug(logs.GetRequestedObjectIsVirtual)
+ exec.assemble(ctx)
case statusOutOfRange:
- r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds)
+ exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
case statusEC:
- r.log.Debug(ctx, logs.GetRequestedObjectIsEC)
- if r.isRaw() && execCnr {
- r.executeOnContainer(ctx)
- r.analyzeStatus(ctx, false)
+ exec.log.Debug(logs.GetRequestedObjectIsEC)
+ if exec.isRaw() && execCnr {
+ exec.executeOnContainer(ctx)
+ exec.analyzeStatus(ctx, false)
}
- r.assembleEC(ctx)
+ exec.assembleEC(ctx)
default:
- r.log.Debug(ctx, logs.OperationFinishedWithError,
- zap.Error(r.err),
+ exec.log.Debug(logs.OperationFinishedWithError,
+ zap.Error(exec.err),
)
var errAccessDenied *apistatus.ObjectAccessDenied
- if execCnr && errors.As(r.err, &errAccessDenied) {
+ if execCnr && errors.As(exec.err, &errAccessDenied) {
// Local get can't return access denied error, so this error was returned by
// write to the output stream. So there is no need to try to find object on other nodes.
return
}
if execCnr {
- r.executeOnContainer(ctx)
- r.analyzeStatus(ctx, false)
+ exec.executeOnContainer(ctx)
+ exec.analyzeStatus(ctx, false)
}
}
}
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 3efc72065..6827018dc 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -63,7 +63,7 @@ type testClient struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
+func (e testEpochReceiver) Epoch() (uint64, error) {
return uint64(e), nil
}
@@ -79,7 +79,7 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
+func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
opts := make([]placement.Option, 0, 4)
opts = append(opts,
placement.ForContainer(g.c),
@@ -91,13 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.
opts = append(opts, placement.ForObject(*obj))
}
- t, err := placement.NewTraverser(context.Background(), opts...)
+ t, err := placement.NewTraverser(opts...)
return t, &containerCore.Container{
Value: g.c,
}, err
}
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go
index 83ef54744..599a6f176 100644
--- a/pkg/services/object/get/getrangeec_test.go
+++ b/pkg/services/object/get/getrangeec_test.go
@@ -28,14 +28,14 @@ type containerStorage struct {
cnt *container.Container
}
-func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) {
+func (cs *containerStorage) Get(cid.ID) (*coreContainer.Container, error) {
coreCnt := coreContainer.Container{
Value: *cs.cnt,
}
return &coreCnt, nil
}
-func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
+func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) {
return nil, nil
}
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index cfabb082f..1cd5e549c 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -31,7 +31,7 @@ func (r *request) executeLocal(ctx context.Context) {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err))
+ r.log.Debug(logs.GetLocalGetFailed, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 78ca5b5e3..f2639f8e6 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -18,9 +18,9 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
defer span.End()
- r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
+ r.log.Debug(logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
- rs, ok := r.getRemoteStorage(ctx, info)
+ rs, ok := r.getRemoteStorage(info)
if !ok {
return true
}
@@ -35,7 +35,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
switch {
default:
- r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err))
+ r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err))
if r.status != statusEC {
// for raw requests, continue to collect other parts
r.status = statusUndefined
diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go
index 2c64244cf..0df67dec9 100644
--- a/pkg/services/object/get/remote_getter.go
+++ b/pkg/services/object/get/remote_getter.go
@@ -30,7 +30,7 @@ func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Ob
if err != nil {
return nil, err
}
- epoch, err := g.es.Epoch(ctx)
+ epoch, err := g.es.Epoch()
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go
index 268080486..1a7a43a35 100644
--- a/pkg/services/object/get/request.go
+++ b/pkg/services/object/get/request.go
@@ -47,14 +47,14 @@ func (r *request) setLogger(l *logger.Logger) {
req = "GET_RANGE"
}
- r.log = l.With(
+ r.log = &logger.Logger{Logger: l.With(
zap.String("request", req),
zap.Stringer("address", r.address()),
zap.Bool("raw", r.isRaw()),
zap.Bool("local", r.isLocal()),
zap.Bool("with session", r.prm.common.SessionToken() != nil),
zap.Bool("with bearer", r.prm.common.BearerToken() != nil),
- )
+ )}
}
func (r *request) isLocal() bool {
@@ -116,20 +116,20 @@ func (r *request) netmapLookupDepth() uint64 {
return r.prm.common.NetmapLookupDepth()
}
-func (r *request) initEpoch(ctx context.Context) bool {
+func (r *request) initEpoch() bool {
r.curProcEpoch = r.netmapEpoch()
if r.curProcEpoch > 0 {
return true
}
- e, err := r.epochSource.Epoch(ctx)
+ e, err := r.epochSource.Epoch()
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
+ r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
return false
case err == nil:
@@ -138,17 +138,17 @@ func (r *request) initEpoch(ctx context.Context) bool {
}
}
-func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) {
+func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) {
obj := addr.Object()
- t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch)
+ t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch)
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
return nil, false
case err == nil:
@@ -156,13 +156,13 @@ func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*pla
}
}
-func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) {
+func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) {
rs, err := r.remoteStorageConstructor.Get(info)
if err != nil {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient)
+ r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient)
return nil, false
}
@@ -185,7 +185,7 @@ func (r *request) writeCollectedHeader(ctx context.Context) bool {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
@@ -206,7 +206,7 @@ func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object)
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index a103f5a7f..3413abeb7 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -34,7 +34,7 @@ func New(
result := &Service{
keyStore: ks,
epochSource: es,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
localStorage: &engineLocalStorage{
engine: e,
},
@@ -53,6 +53,6 @@ func New(
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(s *Service) {
- s.log = l
+ s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))}
}
}
diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go
index 664366d1b..9669afdba 100644
--- a/pkg/services/object/get/types.go
+++ b/pkg/services/object/get/types.go
@@ -20,11 +20,11 @@ import (
)
type epochSource interface {
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
}
type traverserGenerator interface {
- GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
type keyStorage interface {
diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go
index 308ccd512..e8e82ddd9 100644
--- a/pkg/services/object/get/v2/get_range_hash.go
+++ b/pkg/services/object/get/v2/get_range_hash.go
@@ -22,7 +22,7 @@ import (
// GetRangeHash calls internal service and returns v2 response.
func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- forward, err := s.needToForwardGetRangeHashRequest(ctx, req)
+ forward, err := s.needToForwardGetRangeHashRequest(req)
if err != nil {
return nil, err
}
@@ -48,7 +48,7 @@ type getRangeForwardParams struct {
address oid.Address
}
-func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
+func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
if req.GetMetaHeader().GetTTL() <= 1 {
return getRangeForwardParams{}, nil
}
@@ -66,17 +66,17 @@ func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *obj
}
result.address = addr
- cont, err := s.contSource.Get(ctx, addr.Container())
+ cont, err := s.contSource.Get(addr.Container())
if err != nil {
return result, fmt.Errorf("(%T) could not get container: %w", s, err)
}
- epoch, err := s.netmapSource.Epoch(ctx)
+ epoch, err := s.netmapSource.Epoch()
if err != nil {
return result, fmt.Errorf("(%T) could not get epoch: %w", s, err)
}
- nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch)
+ nm, err := s.netmapSource.GetNetMapByEpoch(epoch)
if err != nil {
return result, fmt.Errorf("(%T) could not get netmap: %w", s, err)
}
@@ -84,7 +84,7 @@ func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *obj
builder := placement.NewNetworkMapBuilder(nm)
objectID := addr.Object()
- nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy())
+ nodesVector, err := builder.BuildPlacement(addr.Container(), &objectID, cont.Value.PlacementPolicy())
if err != nil {
return result, fmt.Errorf("(%T) could not build object placement: %w", s, err)
}
@@ -125,14 +125,14 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
var addrGr network.AddressGroup
if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil {
- s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ s.log.Warn(logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
continue
}
var extAddr network.AddressGroup
if len(node.ExternalAddresses()) > 0 {
if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil {
- s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ s.log.Warn(logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
continue
}
}
@@ -150,12 +150,12 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
if firstErr == nil {
firstErr = err
}
- s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode,
+ s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromNode,
zap.String("node_public_key", hex.EncodeToString(node.PublicKey())),
zap.Stringer("address", params.address),
zap.Error(err))
}
- s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
+ s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
if firstErr != nil {
return nil, firstErr
}
diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go
index 0ec8912fd..24b2f0099 100644
--- a/pkg/services/object/get/v2/service.go
+++ b/pkg/services/object/get/v2/service.go
@@ -60,7 +60,7 @@ func NewService(svc *getsvc.Service,
netmapSource: netmapSource,
announcedKeys: announcedKeys,
contSource: contSource,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
}
for i := range opts {
@@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get V2 service"))}
}
}
diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go
index 0d73bcd4d..98207336c 100644
--- a/pkg/services/object/get/v2/streamer.go
+++ b/pkg/services/object/get/v2/streamer.go
@@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec
p.SetHeader(objV2.GetHeader())
p.SetSignature(objV2.GetSignature())
- return s.Send(newResponse(p))
+ return s.GetObjectStream.Send(newResponse(p))
}
func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error {
p := new(objectV2.GetObjectPartChunk)
p.SetChunk(chunk)
- return s.Send(newResponse(p))
+ return s.GetObjectStream.Send(newResponse(p))
}
func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
@@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
}
func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error {
- return s.Send(newRangeResponse(chunk))
+ return s.GetObjectRangeStream.Send(newRangeResponse(chunk))
}
func newRangeResponse(p []byte) *objectV2.GetRangeResponse {
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index e699a3779..bfa7fd619 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -3,7 +3,6 @@ package getsvc
import (
"context"
"crypto/sha256"
- "errors"
"hash"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -183,7 +182,9 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran
default:
return nil, errUnknownChechsumType(t)
case refs.SHA256:
- p.SetHashGenerator(sha256.New)
+ p.SetHashGenerator(func() hash.Hash {
+ return sha256.New()
+ })
case refs.TillichZemor:
p.SetHashGenerator(func() hash.Hash {
return tz.New()
@@ -359,20 +360,19 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien
info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) {
var err error
+
+ defer func() {
+ stop = err == nil
+
+ if stop || firstErr == nil {
+ firstErr = err
+ }
+
+ // would be nice to log otherwise
+ }()
+
res, err = f(ctx, addr, c, key)
- // non-status logic error that could be returned
- // from the SDK client; should not be considered
- // as a connection error
- var siErr *objectSDK.SplitInfoError
- var eiErr *objectSDK.ECInfoError
-
- stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr)
-
- if stop || firstErr == nil {
- firstErr = err
- }
-
return
})
diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go
index 3e8832640..2c405070d 100644
--- a/pkg/services/object/internal/client/client.go
+++ b/pkg/services/object/internal/client/client.go
@@ -7,11 +7,9 @@ import (
"errors"
"fmt"
"io"
- "strconv"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -33,8 +31,6 @@ type commonPrm struct {
local bool
xHeaders []string
-
- netmapEpoch uint64
}
// SetClient sets base client for ForstFS API communication.
@@ -77,14 +73,6 @@ func (x *commonPrm) SetXHeaders(hs []string) {
x.xHeaders = hs
}
-func (x *commonPrm) calculateXHeaders() []string {
- hs := x.xHeaders
- if x.netmapEpoch != 0 {
- hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10))
- }
- return hs
-}
-
type readPrmCommon struct {
commonPrm
}
@@ -92,8 +80,8 @@ type readPrmCommon struct {
// SetNetmapEpoch sets the epoch number to be used to locate the objectSDK.
//
// By default current epoch on the server will be used.
-func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) {
- x.netmapEpoch = epoch
+func (x *readPrmCommon) SetNetmapEpoch(_ uint64) {
+ // FIXME(@fyrchik): https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/465
}
// GetObjectPrm groups parameters of GetObject operation.
@@ -151,7 +139,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Key = prm.key
@@ -245,7 +233,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error)
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams)
if err == nil {
@@ -338,7 +326,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Length = prm.ln
@@ -402,7 +390,7 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
defer span.End()
prmCli := client.PrmObjectPutInit{
- XHeaders: prm.calculateXHeaders(),
+ XHeaders: prm.xHeaders,
BearerToken: prm.tokenBearer,
Session: prm.tokenSession,
Local: true,
@@ -449,7 +437,7 @@ func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, erro
}
prmCli := client.PrmObjectPutSingle{
- XHeaders: prm.calculateXHeaders(),
+ XHeaders: prm.xHeaders,
BearerToken: prm.tokenBearer,
Session: prm.tokenSession,
Local: true,
@@ -508,7 +496,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
prm.cliPrm.Local = prm.local
prm.cliPrm.Session = prm.tokenSession
prm.cliPrm.BearerToken = prm.tokenBearer
- prm.cliPrm.XHeaders = prm.calculateXHeaders()
+ prm.cliPrm.XHeaders = prm.xHeaders
prm.cliPrm.Key = prm.key
rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm)
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index 6a6ee0f0f..377350fdd 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -4,7 +4,6 @@ import (
"context"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
@@ -35,7 +34,7 @@ type (
}
MetricRegister interface {
- AddRequestDuration(string, time.Duration, bool, string)
+ AddRequestDuration(string, time.Duration, bool)
AddPayloadSize(string, int)
}
)
@@ -52,7 +51,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
if m.enabled {
t := time.Now()
defer func() {
- m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("Get", time.Since(t), err == nil)
}()
err = m.next.Get(req, &getStreamMetric{
ServerStream: stream,
@@ -65,11 +64,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
return
}
-func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
+func (m MetricCollector) Put() (PutObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Put(ctx)
+ stream, err := m.next.Put()
if err != nil {
return nil, err
}
@@ -80,14 +79,14 @@ func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
start: t,
}, nil
}
- return m.next.Put(ctx)
+ return m.next.Put()
}
-func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) {
+func (m MetricCollector) Patch() (PatchObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Patch(ctx)
+ stream, err := m.next.Patch()
if err != nil {
return nil, err
}
@@ -98,7 +97,7 @@ func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) {
start: t,
}, nil
}
- return m.next.Patch(ctx)
+ return m.next.Patch()
}
func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) {
@@ -107,7 +106,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl
res, err := m.next.PutSingle(ctx, request)
- m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil)
if err == nil {
m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload()))
}
@@ -123,7 +122,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest)
res, err := m.next.Head(ctx, request)
- m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("Head", time.Since(t), err == nil)
return res, err
}
@@ -136,7 +135,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream)
err := m.next.Search(req, stream)
- m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("Search", time.Since(t), err == nil)
return err
}
@@ -149,7 +148,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque
res, err := m.next.Delete(ctx, request)
- m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil)
return res, err
}
return m.next.Delete(ctx, request)
@@ -161,7 +160,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR
err := m.next.GetRange(req, stream)
- m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil)
return err
}
@@ -174,7 +173,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa
res, err := m.next.GetRangeHash(ctx, request)
- m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil)
return res, err
}
@@ -210,7 +209,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error
func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
+ s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil)
return res, err
}
@@ -224,7 +223,7 @@ func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) e
func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
+ s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil)
return res, err
}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index 5d298bfed..953f82b48 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config,
// Patch calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
- nodeKey, err := s.KeyStorage.GetKey(nil)
+ nodeKey, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index ff13b1d3e..91b4efdc1 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -112,7 +112,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
}
oV2.GetHeader().SetOwnerID(ownerID)
- target, err := target.New(ctx, objectwriter.Params{
+ target, err := target.New(objectwriter.Params{
Config: s.Config,
Common: commonPrm,
Header: objectSDK.NewFromV2(oV2),
@@ -195,12 +195,7 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
patch.FromV2(req.GetBody())
if !s.nonFirstSend {
- err := s.patcher.ApplyHeaderPatch(ctx,
- patcher.ApplyHeaderPatchPrm{
- NewSplitHeader: patch.NewSplitHeader,
- NewAttributes: patch.NewAttributes,
- ReplaceAttributes: patch.ReplaceAttributes,
- })
+ err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes)
if err != nil {
return fmt.Errorf("patch attributes: %w", err)
}
@@ -219,9 +214,6 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
}
func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
- if s.patcher == nil {
- return nil, errors.New("uninitialized patch streamer")
- }
patcherResp, err := s.patcher.Close(ctx)
if err != nil {
return nil, err
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index 7aeb5857d..8cf4f0d62 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -26,7 +27,9 @@ func NewService(ks *objutil.KeyStorage,
opts ...objectwriter.Option,
) *Service {
c := &objectwriter.Config{
- Logger: logger.NewLoggerWrapper(zap.L()),
+ RemotePool: util.NewPseudoWorkerPool(),
+ LocalPool: util.NewPseudoWorkerPool(),
+ Logger: &logger.Logger{Logger: zap.L()},
KeyStorage: ks,
ClientConstructor: cc,
MaxSizeSrc: ms,
@@ -56,8 +59,8 @@ func NewService(ks *objutil.KeyStorage,
}
}
-func (s *Service) Put() (*Streamer, error) {
+func (p *Service) Put() (*Streamer, error) {
return &Streamer{
- Config: s.Config,
+ Config: p.Config,
}, nil
}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 90f473254..3a0b3901f 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -21,6 +21,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
@@ -28,7 +29,6 @@ import (
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
@@ -86,7 +86,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest
}
func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
- if err := s.validarePutSingleSize(ctx, obj); err != nil {
+ if err := s.validarePutSingleSize(obj); err != nil {
return object.ContentMeta{}, err
}
@@ -97,12 +97,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object)
return s.validatePutSingleObject(ctx, obj)
}
-func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error {
+func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error {
if uint64(len(obj.Payload())) != obj.PayloadSize() {
return target.ErrWrongPayloadSize
}
- maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx)
+ maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize()
if obj.PayloadSize() > maxAllowedSize {
return target.ErrExceedingMaxSize
}
@@ -153,7 +153,7 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob
func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
localOnly := req.GetMetaHeader().GetTTL() <= 1
- placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly)
+ placement, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly)
if err != nil {
return err
}
@@ -166,13 +166,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
}
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
- iter := s.NewNodeIterator(placement.placementOptions)
+ iter := s.Config.NewNodeIterator(placement.placementOptions)
iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.KeyStorage,
+ keyStorage: s.Config.KeyStorage,
signer: &sync.Once{},
}
@@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
if err != nil {
return err
}
- key, err := s.KeyStorage.GetKey(nil)
+ key, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
return err
}
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.KeyStorage,
+ keyStorage: s.Config.KeyStorage,
signer: &sync.Once{},
}
@@ -218,14 +218,14 @@ type putSinglePlacement struct {
resetSuccessAfterOnBroadcast bool
}
-func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
+func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
var result putSinglePlacement
cnrID, ok := obj.ContainerID()
if !ok {
return result, errors.New("missing container ID")
}
- cnrInfo, err := s.ContainerSource.Get(ctx, cnrID)
+ cnrInfo, err := s.Config.ContainerSource.Get(cnrID)
if err != nil {
return result, fmt.Errorf("could not get container by ID: %w", err)
}
@@ -249,14 +249,14 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS
}
result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
- latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource)
+ latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource)
if err != nil {
return result, fmt.Errorf("could not get latest network map: %w", err)
}
builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
- builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys)
+ builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys)
}
result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
return result, nil
@@ -273,7 +273,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
- c, err := s.ClientConstructor.Get(info)
+ c, err := s.Config.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
@@ -283,7 +283,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
localTarget := &objectwriter.LocalTarget{
- Storage: s.LocalStore,
+ Storage: s.Config.LocalStore,
Container: container,
}
return localTarget.WriteObject(ctx, obj, meta)
@@ -317,11 +317,12 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
- s.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
+ s.Config.Logger.Warn(logs.PutSingleRedirectFailure,
zap.Error(err),
zap.Stringer("address", addr),
zap.Stringer("object_id", objID),
zap.Stringer("container_id", cnrID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
@@ -350,12 +351,8 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
err = signature.VerifyServiceMessage(resp)
if err != nil {
err = fmt.Errorf("response verification failed: %w", err)
- return
}
- st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus())
- err = apistatus.ErrFromStatus(st)
-
return
})
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 19768b7fa..f71309d31 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -36,7 +36,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
}
var err error
- p.target, err = target.New(ctx, prmTarget)
+ p.target, err = target.New(prmTarget)
if err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index f0c648187..36b514fbc 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -56,10 +56,10 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
s.saveChunks = v.GetSignature() != nil
if s.saveChunks {
- maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx)
+ maxSz := s.stream.MaxSizeSrc.MaxObjectSize()
s.sizes = &sizes{
- payloadSz: v.GetHeader().GetPayloadLength(),
+ payloadSz: uint64(v.GetHeader().GetPayloadLength()),
}
// check payload size limit overflow
diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go
deleted file mode 100644
index 01eb1ea8d..000000000
--- a/pkg/services/object/qos.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package object
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
-)
-
-var _ ServiceServer = (*qosObjectService)(nil)
-
-type AdjustIOTag interface {
- AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
-}
-
-type qosObjectService struct {
- next ServiceServer
- adj AdjustIOTag
-}
-
-func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer {
- return &qosObjectService{
- next: next,
- adj: adjIOTag,
- }
-}
-
-func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Delete(ctx, req)
-}
-
-func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Get(req, &qosReadStream[*object.GetResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.GetRangeHash(ctx, req)
-}
-
-func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Head(ctx, req)
-}
-
-func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) {
- s, err := q.next.Patch(ctx)
- if err != nil {
- return nil, err
- }
- return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{
- s: s,
- adj: q.adj,
- }, nil
-}
-
-func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) {
- s, err := q.next.Put(ctx)
- if err != nil {
- return nil, err
- }
- return &qosWriteStream[*object.PutRequest, *object.PutResponse]{
- s: s,
- adj: q.adj,
- }, nil
-}
-
-func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.PutSingle(ctx, req)
-}
-
-func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Search(req, &qosReadStream[*object.SearchResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-type qosSend[T any] interface {
- Send(T) error
-}
-
-type qosReadStream[T any] struct {
- sender qosSend[T]
- ctxF func() context.Context
-}
-
-func (g *qosReadStream[T]) Context() context.Context {
- return g.ctxF()
-}
-
-func (g *qosReadStream[T]) Send(resp T) error {
- return g.sender.Send(resp)
-}
-
-type qosVerificationHeader interface {
- GetVerificationHeader() *session.RequestVerificationHeader
-}
-
-type qosSendRecv[TReq qosVerificationHeader, TResp any] interface {
- Send(context.Context, TReq) error
- CloseAndRecv(context.Context) (TResp, error)
-}
-
-type qosWriteStream[TReq qosVerificationHeader, TResp any] struct {
- s qosSendRecv[TReq, TResp]
- adj AdjustIOTag
-
- ioTag string
- ioTagDefined bool
-}
-
-func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) {
- if q.ioTagDefined {
- ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
- }
- return q.s.CloseAndRecv(ctx)
-}
-
-func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error {
- if !q.ioTagDefined {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx)
- }
- assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment")
- ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
- return q.s.Send(ctx, req)
-}
diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go
new file mode 100644
index 000000000..95d4c9d93
--- /dev/null
+++ b/pkg/services/object/request_context.go
@@ -0,0 +1,26 @@
+package object
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+type RequestContextKeyT struct{}
+
+var RequestContextKey = RequestContextKeyT{}
+
+// RequestContext is a context passed between middleware handlers.
+type RequestContext struct {
+ Namespace string
+
+ SenderKey []byte
+
+ ContainerOwner user.ID
+
+ Role acl.Role
+
+ SoftAPECheck bool
+
+ BearerToken *bearer.Token
+}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index 80c971e8f..3787b4168 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -80,8 +80,8 @@ func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutRespo
return r, nil
}
-func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *ResponseService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -109,8 +109,8 @@ func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchR
return r, nil
}
-func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) {
- stream, err := s.svc.Patch(ctx)
+func (s *ResponseService) Patch() (PatchObjectStream, error) {
+ stream, err := s.svc.Patch()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index 60d469b11..39259b0ca 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -15,12 +15,12 @@ import (
func (exec *execCtx) executeOnContainer(ctx context.Context) error {
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug(ctx, logs.TryingToExecuteInContainer,
+ exec.log.Debug(logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- if err := exec.initEpoch(ctx); err != nil {
+ if err := exec.initEpoch(); err != nil {
return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err)
}
@@ -44,11 +44,11 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ProcessEpoch,
+ exec.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
- traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch)
+ traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch)
if err != nil {
return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err)
}
@@ -59,7 +59,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
+ exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
break
}
@@ -72,8 +72,8 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
defer wg.Done()
select {
case <-ctx.Done():
- exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
- zap.Error(ctx.Err()))
+ exec.log.Debug(logs.InterruptPlacementIterationByContext,
+ zap.String("error", ctx.Err().Error()))
return
default:
}
@@ -82,18 +82,18 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+ exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
- exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err))
+ exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
return
}
ids, err := c.searchObjects(ctx, exec, info)
if err != nil {
- exec.log.Debug(ctx, logs.SearchRemoteOperationFailed,
- zap.Error(err))
+ exec.log.Debug(logs.SearchRemoteOperationFailed,
+ zap.String("error", err.Error()))
return
}
@@ -102,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
err = exec.writeIDList(ids)
mtx.Unlock()
if err != nil {
- exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err))
+ exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
return
}
}(i)
@@ -114,9 +114,9 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
return nil
}
-func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) {
+func (exec *execCtx) getContainer() (containerSDK.Container, error) {
cnrID := exec.containerID()
- cnr, err := exec.svc.containerSource.Get(ctx, cnrID)
+ cnr, err := exec.svc.containerSource.Get(cnrID)
if err != nil {
return containerSDK.Container{}, err
}
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index ced51ecce..4a2c04ecd 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -1,8 +1,6 @@
package searchsvc
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -21,13 +19,13 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = l.With(
+ exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "SEARCH"),
zap.Stringer("container", exec.containerID()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )
+ )}
}
func (exec *execCtx) isLocal() bool {
@@ -50,13 +48,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 {
return exec.prm.common.NetmapLookupDepth()
}
-func (exec *execCtx) initEpoch(ctx context.Context) error {
+func (exec *execCtx) initEpoch() error {
exec.curProcEpoch = exec.netmapEpoch()
if exec.curProcEpoch > 0 {
return nil
}
- e, err := exec.svc.currentEpochReceiver.Epoch(ctx)
+ e, err := exec.svc.currentEpochReceiver.Epoch()
if err != nil {
return err
}
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index ec65ab06a..cfaed13b8 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -11,7 +11,7 @@ import (
func (exec *execCtx) executeLocal(ctx context.Context) error {
ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil {
- exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err))
+ exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
return err
}
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 76c091f85..4a5c414d5 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -20,26 +20,26 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ServingRequest)
+ exec.log.Debug(logs.ServingRequest)
err := exec.executeLocal(ctx)
- exec.logResult(ctx, err)
+ exec.logResult(err)
if exec.isLocal() {
- exec.log.Debug(ctx, logs.SearchReturnResultDirectly)
+ exec.log.Debug(logs.SearchReturnResultDirectly)
return err
}
err = exec.executeOnContainer(ctx)
- exec.logResult(ctx, err)
+ exec.logResult(err)
return err
}
-func (exec *execCtx) logResult(ctx context.Context, err error) {
+func (exec *execCtx) logResult(err error) {
switch {
default:
- exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
+ exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
case err == nil:
- exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
}
}
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index 918ad421f..0a40025e1 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -6,7 +6,6 @@ import (
"crypto/sha256"
"errors"
"fmt"
- "slices"
"strconv"
"testing"
@@ -59,7 +58,7 @@ type simpleIDWriter struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
+func (e testEpochReceiver) Epoch() (uint64, error) {
return uint64(e), nil
}
@@ -82,8 +81,8 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
- t, err := placement.NewTraverser(context.Background(),
+func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
+ t, err := placement.NewTraverser(
placement.ForContainer(g.c),
placement.UseBuilder(g.b[epoch]),
placement.WithoutSuccessTracking(),
@@ -91,7 +90,7 @@ func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID
return t, &containerCore.Container{Value: g.c}, err
}
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
@@ -104,7 +103,8 @@ func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, o
return nil, errors.New("vectors for address not found")
}
- res := slices.Clone(vs)
+ res := make([][]netmap.NodeInfo, len(vs))
+ copy(res, vs)
return res, nil
}
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index 56fe56468..7700f78d8 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -46,11 +46,11 @@ type cfg struct {
}
traverserGenerator interface {
- GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
currentEpochReceiver interface {
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
}
keyStore *util.KeyStorage
@@ -69,7 +69,7 @@ func New(e *engine.StorageEngine,
opts ...Option,
) *Service {
c := &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
clientConstructor: &clientConstructorWrapper{
constructor: cc,
},
@@ -94,6 +94,6 @@ func New(e *engine.StorageEngine,
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))}
}
}
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 0be5345b9..910384a0b 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -2,7 +2,6 @@ package searchsvc
import (
"context"
- "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -54,7 +53,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error {
}
// exclude processed address
- list = slices.Delete(list, i, i+1)
+ list = append(list[:i], list[i+1:]...)
i--
}
@@ -114,7 +113,7 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
}
func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
- cnr, err := exec.getContainer(ctx)
+ cnr, err := exec.getContainer()
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index e65293977..c570e9d8e 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -41,8 +41,8 @@ type PatchObjectStream interface {
// serving v2 Object service.
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
- Put(context.Context) (PutObjectStream, error)
- Patch(context.Context) (PatchObjectStream, error)
+ Put() (PutObjectStream, error)
+ Patch() (PatchObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index fd8e926dd..2c5e794e9 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -96,16 +96,15 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- err = fmt.Errorf("could not close stream and receive response: %w", err)
- resp = new(object.PutResponse)
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *SignService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -133,16 +132,15 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- err = fmt.Errorf("could not close stream and receive response: %w", err)
- resp = new(object.PatchResponse)
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) {
- stream, err := s.svc.Patch(ctx)
+func (s *SignService) Patch() (PatchObjectStream, error) {
+ stream, err := s.svc.Patch()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index b446d3605..1438a0ea2 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -87,12 +87,12 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream)
})
}
-func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) {
- return c.next.Put(ctx)
+func (c TransportSplitter) Put() (PutObjectStream, error) {
+ return c.next.Put()
}
-func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) {
- return c.next.Patch(ctx)
+func (c TransportSplitter) Patch() (PatchObjectStream, error) {
+ return c.next.Patch()
}
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
@@ -162,13 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error {
var newResp *object.SearchResponse
- for {
+ for ln := uint64(len(ids)); ; {
if newResp == nil {
newResp = new(object.SearchResponse)
newResp.SetBody(body)
}
- cut := min(s.addrAmount, uint64(len(ids)))
+ cut := min(s.addrAmount, ln)
body.SetIDList(ids[:cut])
newResp.SetMetaHeader(resp.GetMetaHeader())
diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go
index b10826226..92beedaa7 100644
--- a/pkg/services/object/util/log.go
+++ b/pkg/services/object/util/log.go
@@ -1,8 +1,6 @@
package util
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -10,10 +8,18 @@ import (
)
// LogServiceError writes error message of object service to provided logger.
-func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) {
- l.Error(ctx, logs.UtilObjectServiceError,
+func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) {
+ l.Error(logs.UtilObjectServiceError,
zap.String("node", network.StringifyGroup(node)),
zap.String("request", req),
- zap.Error(err),
+ zap.String("error", err.Error()),
+ )
+}
+
+// LogWorkerPoolError writes debug error message of object worker pool to provided logger.
+func LogWorkerPoolError(l *logger.Logger, req string, err error) {
+ l.Error(logs.UtilCouldNotPushTaskToWorkerPool,
+ zap.String("request", req),
+ zap.String("error", err.Error()),
)
}
diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go
index f74b0aab9..1bd39f9ea 100644
--- a/pkg/services/object/util/placement.go
+++ b/pkg/services/object/util/placement.go
@@ -1,9 +1,7 @@
package util
import (
- "context"
"fmt"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -45,8 +43,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu
}
}
-func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
+func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -78,8 +76,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac
}
}
-func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
+func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -94,7 +92,7 @@ func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *o
}
if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) {
- vs[i] = slices.Delete(vs[i], j, j+1)
+ vs[i] = append(vs[i][:j], vs[i][j+1:]...)
j--
}
}
@@ -124,15 +122,15 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav
// GenerateTraverser generates placement Traverser for provided object address
// using epoch-th network map.
-func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
+func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
// get network map by epoch
- nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch)
+ nm, err := g.netMapSrc.GetNetMapByEpoch(epoch)
if err != nil {
return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err)
}
// get container related container
- cnr, err := g.cnrSrc.Get(ctx, idCnr)
+ cnr, err := g.cnrSrc.Get(idCnr)
if err != nil {
return nil, nil, fmt.Errorf("could not get container: %w", err)
}
@@ -162,7 +160,7 @@ func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID
)
}
- t, err := placement.NewTraverser(ctx, traverseOpts...)
+ t, err := placement.NewTraverser(traverseOpts...)
if err != nil {
return nil, nil, err
}
diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go
index 7242970b5..a890d5357 100644
--- a/pkg/services/object_manager/placement/cache_test.go
+++ b/pkg/services/object_manager/placement/cache_test.go
@@ -85,10 +85,7 @@ func TestContainerNodesCache(t *testing.T) {
})
t.Run("the error is propagated", func(t *testing.T) {
var pp netmapSDK.PlacementPolicy
- r := netmapSDK.ReplicaDescriptor{}
- r.SetNumberOfObjects(1)
- r.SetSelectorName("Missing")
- pp.AddReplicas(r)
+ require.NoError(t, pp.DecodeString("REP 1 SELECT 1 FROM X FILTER ATTR EQ 42 AS X"))
c := placement.NewContainerNodesCache(size)
_, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp)
diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go
index 0f24a9d96..45e6df339 100644
--- a/pkg/services/object_manager/placement/metrics.go
+++ b/pkg/services/object_manager/placement/metrics.go
@@ -2,90 +2,24 @@ package placement
import (
"errors"
- "fmt"
- "maps"
- "math"
"strings"
- "sync"
- "sync/atomic"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
- locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
const (
attrPrefix = "$attribute:"
-
- geoDistance = "$geoDistance"
)
type Metric interface {
CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int
}
-type metricsParser struct {
- locodeDBPath string
- locodes map[string]locodedb.Point
-}
-
-type MetricParser interface {
- ParseMetrics([]string) ([]Metric, error)
-}
-
-func NewMetricsParser(locodeDBPath string) (MetricParser, error) {
- return &metricsParser{
- locodeDBPath: locodeDBPath,
- }, nil
-}
-
-func (p *metricsParser) initLocodes() error {
- if len(p.locodes) != 0 {
- return nil
+func ParseMetric(raw string) (Metric, error) {
+ if attr, found := strings.CutPrefix(raw, attrPrefix); found {
+ return NewAttributeMetric(attr), nil
}
- if len(p.locodeDBPath) > 0 {
- p.locodes = make(map[string]locodedb.Point)
- locodeDB := locodebolt.New(locodebolt.Prm{
- Path: p.locodeDBPath,
- },
- locodebolt.ReadOnly(),
- )
- err := locodeDB.Open()
- if err != nil {
- return err
- }
- defer locodeDB.Close()
- err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) {
- p.locodes[k] = v
- })
- if err != nil {
- return err
- }
- return nil
- }
- return errors.New("set path to locode database")
-}
-
-func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) {
- var metrics []Metric
- for _, raw := range priority {
- if attr, found := strings.CutPrefix(raw, attrPrefix); found {
- metrics = append(metrics, NewAttributeMetric(attr))
- } else if raw == geoDistance {
- err := p.initLocodes()
- if err != nil {
- return nil, err
- }
- if len(p.locodes) == 0 {
- return nil, fmt.Errorf("provide locodes database for metric %s", raw)
- }
- m := NewGeoDistanceMetric(p.locodes)
- metrics = append(metrics, m)
- } else {
- return nil, fmt.Errorf("unsupported priority metric %s", raw)
- }
- }
- return metrics, nil
+ return nil, errors.New("unsupported priority metric")
}
// attributeMetric describes priority metric based on attribute.
@@ -107,79 +41,3 @@ func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.Node
func NewAttributeMetric(attr string) Metric {
return &attributeMetric{attribute: attr}
}
-
-// geoDistanceMetric describes priority metric based on attribute.
-type geoDistanceMetric struct {
- locodes map[string]locodedb.Point
- distance *atomic.Pointer[map[string]int]
- mtx sync.Mutex
-}
-
-func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric {
- d := atomic.Pointer[map[string]int]{}
- m := make(map[string]int)
- d.Store(&m)
- gm := &geoDistanceMetric{
- locodes: locodes,
- distance: &d,
- }
- return gm
-}
-
-// CalculateValue return distance in kilometers between current node and provided,
-// if coordinates for provided node found. In other case return math.MaxInt.
-func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
- fl := from.LOCODE()
- tl := to.LOCODE()
- if fl == tl {
- return 0
- }
- m := gm.distance.Load()
- if v, ok := (*m)[fl+tl]; ok {
- return v
- }
- return gm.calculateDistance(fl, tl)
-}
-
-func (gm *geoDistanceMetric) calculateDistance(from, to string) int {
- gm.mtx.Lock()
- defer gm.mtx.Unlock()
- od := gm.distance.Load()
- if v, ok := (*od)[from+to]; ok {
- return v
- }
- nd := maps.Clone(*od)
- var dist int
- pointFrom, okFrom := gm.locodes[from]
- pointTo, okTo := gm.locodes[to]
- if okFrom && okTo {
- dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude()))
- } else {
- dist = math.MaxInt
- }
- nd[from+to] = dist
- gm.distance.Store(&nd)
-
- return dist
-}
-
-// distance return amount of KM between two points.
-// Parameters are latitude and longitude of point 1 and 2 in decimal degrees.
-// Original implementation can be found here https://www.geodatasource.com/developers/go.
-func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 {
- radLat1 := math.Pi * lt1 / 180
- radLat2 := math.Pi * lt2 / 180
- radTheta := math.Pi * (ln1 - ln2) / 180
-
- dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta)
-
- if dist > 1 {
- dist = 1
- }
-
- dist = math.Acos(dist)
- dist = dist * 180 / math.Pi
- dist = dist * 60 * 1.1515 * 1.609344
-
- return dist
-}
diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go
index b3f8d9c03..1782e27ea 100644
--- a/pkg/services/object_manager/placement/netmap.go
+++ b/pkg/services/object_manager/placement/netmap.go
@@ -1,7 +1,6 @@
package placement
import (
- "context"
"crypto/sha256"
"fmt"
@@ -36,12 +35,12 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder {
}
}
-func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) {
+func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) {
return s.nm, nil
}
-func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc)
+func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ nm, err := netmap.GetLatestNetworkMap(b.nmSrc)
if err != nil {
return nil, fmt.Errorf("could not get network map: %w", err)
}
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index a3f9af959..6440f187d 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -1,7 +1,6 @@
package placement
import (
- "context"
"errors"
"fmt"
"slices"
@@ -22,7 +21,7 @@ type Builder interface {
//
// Must return all container nodes if object identifier
// is nil.
- BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
+ BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
}
type NodeState interface {
@@ -79,7 +78,7 @@ func defaultCfg() *cfg {
}
// NewTraverser creates, initializes with options and returns Traverser instance.
-func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
+func NewTraverser(opts ...Option) (*Traverser, error) {
cfg := defaultCfg()
for i := range opts {
@@ -99,7 +98,7 @@ func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy)
}
- ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy)
+ ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy)
if err != nil {
return nil, fmt.Errorf("could not build placement: %w", err)
}
@@ -115,13 +114,15 @@ func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
var unsortedVector []netmap.NodeInfo
var regularVector []netmap.NodeInfo
for i := range rem {
- pivot := min(len(ns[i]), rem[i])
- unsortedVector = append(unsortedVector, ns[i][:pivot]...)
- regularVector = append(regularVector, ns[i][pivot:]...)
+ unsortedVector = append(unsortedVector, ns[i][:rem[i]]...)
+ regularVector = append(regularVector, ns[i][rem[i]:]...)
}
rem = []int{-1, -1}
- sortedVector := sortVector(cfg, unsortedVector)
+ sortedVector, err := sortVector(cfg, unsortedVector)
+ if err != nil {
+ return nil, err
+ }
ns = [][]netmap.NodeInfo{sortedVector, regularVector}
} else if cfg.flatSuccess != nil {
ns = flatNodes(ns)
@@ -186,7 +187,7 @@ type nodeMetrics struct {
metrics []int
}
-func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo {
+func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, error) {
nm := make([]nodeMetrics, len(unsortedVector))
node := cfg.nodeState.LocalNodeInfo()
@@ -200,14 +201,14 @@ func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo {
metrics: m,
}
}
- slices.SortStableFunc(nm, func(a, b nodeMetrics) int {
+ slices.SortFunc(nm, func(a, b nodeMetrics) int {
return slices.Compare(a.metrics, b.metrics)
})
sortedVector := make([]netmap.NodeInfo, len(unsortedVector))
for i := range unsortedVector {
sortedVector[i] = unsortedVector[nm[i].index]
}
- return sortedVector
+ return sortedVector, nil
}
// Node is a descriptor of storage node with information required for intra-container communication.
@@ -288,8 +289,8 @@ func (t *Traverser) Next() []Node {
func (t *Traverser) skipEmptyVectors() {
for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body
if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 {
- t.vectors = slices.Delete(t.vectors, i, i+1)
- t.rem = slices.Delete(t.rem, i, i+1)
+ t.vectors = append(t.vectors[:i], t.vectors[i+1:]...)
+ t.rem = append(t.rem[:i], t.rem[i+1:]...)
i--
} else {
break
diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go
index d1370f21e..38f62aa07 100644
--- a/pkg/services/object_manager/placement/traverser_test.go
+++ b/pkg/services/object_manager/placement/traverser_test.go
@@ -1,8 +1,6 @@
package placement
import (
- "context"
- "slices"
"strconv"
"testing"
@@ -19,7 +17,7 @@ type testBuilder struct {
vectors [][]netmap.NodeInfo
}
-func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return b.vectors, nil
}
@@ -35,7 +33,8 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
vc := make([][]netmap.NodeInfo, 0, len(v))
for i := range v {
- ns := slices.Clone(v[i])
+ ns := make([]netmap.NodeInfo, len(v[i]))
+ copy(ns, v[i])
vc = append(vc, ns)
}
@@ -103,7 +102,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithoutSuccessTracking(),
@@ -132,7 +131,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -161,7 +160,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
)
@@ -202,7 +201,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodes, cnr := testPlacement(selectors, replicas)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local)
@@ -277,7 +276,7 @@ func TestTraverserRemValues(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithCopyNumbers(testCase.copyNumbers),
@@ -323,7 +322,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
m := []Metric{NewAttributeMetric("ClusterName")}
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -357,52 +356,6 @@ func TestTraverserPriorityMetrics(t *testing.T) {
require.Nil(t, next)
})
- t.Run("one rep one metric fewer nodes", func(t *testing.T) {
- selectors := []int{2}
- replicas := []int{3}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "B")
-
- sdkNode := testNode(5)
- sdkNode.SetAttribute("ClusterName", "B")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{NewAttributeMetric("ClusterName")}
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `ClusterName` the order will be:
- // [ {Node_0 A}, {Node_1 A} ]
- // With priority metric `ClusterName` and current node in cluster B
- // the order should be:
- // [ {Node_1 B}, {Node_0 A} ]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
t.Run("two reps two metrics", func(t *testing.T) {
selectors := []int{3, 3}
replicas := []int{2, 2}
@@ -446,7 +399,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
NewAttributeMetric("UN-LOCODE"),
}
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -484,7 +437,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
nodesCopy = copyVectors(nodes)
- tr, err = NewTraverser(context.Background(),
+ tr, err = NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -517,7 +470,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
nodesCopy = copyVectors(nodes)
- tr, err = NewTraverser(context.Background(),
+ tr, err = NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -568,7 +521,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
m := []Metric{NewAttributeMetric("ClusterName")}
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -601,53 +554,4 @@ func TestTraverserPriorityMetrics(t *testing.T) {
next = tr.Next()
require.Nil(t, next)
})
-
- t.Run("one rep one geo metric", func(t *testing.T) {
- t.Skip()
- selectors := []int{2}
- replicas := []int{2}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("UN-LOCODE", "RU LED")
-
- sdkNode := testNode(2)
- sdkNode.SetAttribute("UN-LOCODE", "FI HEL")
-
- nodesCopy := copyVectors(nodes)
-
- parser, err := NewMetricsParser("/path/to/locode_db")
- require.NoError(t, err)
- m, err := parser.ParseMetrics([]string{geoDistance})
- require.NoError(t, err)
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `$geoDistance` the order will be:
- // [ {Node_0 RU MOW}, {Node_1 RU LED}]
- // With priority metric `$geoDistance` the order should be:
- // [ {Node_1 RU LED}, {Node_0 RU MOW}]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
}
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index e5f001d5a..7476dbd48 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -57,12 +57,14 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
ts, err := g.tsSource.Tombstone(ctx, a, epoch)
if err != nil {
- log.Warn(ctx,
+ log.Warn(
logs.TombstoneCouldNotGetTheTombstoneTheSource,
zap.Error(err),
)
- } else if ts != nil {
- return g.handleTS(ctx, addrStr, ts, epoch)
+ } else {
+ if ts != nil {
+ return g.handleTS(addrStr, ts, epoch)
+ }
}
// requested tombstone not
@@ -70,12 +72,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
return false
}
-func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
+func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
for _, atr := range ts.Attributes() {
if atr.Key() == objectV2.SysAttributeExpEpoch {
epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
if err != nil {
- g.log.Warn(ctx,
+ g.log.Warn(
logs.TombstoneExpirationParseFailure,
zap.Error(err),
)
diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go
index 2147a32fe..9d33e8179 100644
--- a/pkg/services/object_manager/tombstone/constructor.go
+++ b/pkg/services/object_manager/tombstone/constructor.go
@@ -3,7 +3,6 @@ package tombstone
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
@@ -24,7 +23,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- log: logger.NewLoggerWrapper(zap.NewNop()),
+ log: &logger.Logger{Logger: zap.NewNop()},
cacheSize: defaultLRUCacheSize,
}
}
@@ -50,7 +49,9 @@ func NewChecker(oo ...Option) *ExpirationChecker {
panicOnNil(cfg.tsSource, "Tombstone source")
cache, err := lru.New[string, uint64](cfg.cacheSize)
- assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize))
+ if err != nil {
+ panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err))
+ }
return &ExpirationChecker{
cache: cache,
diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go
index 975941847..1ff07b05a 100644
--- a/pkg/services/object_manager/tombstone/source/source.go
+++ b/pkg/services/object_manager/tombstone/source/source.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -39,7 +38,9 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) {
// Panics if any of the provided options does not allow
// constructing a valid tombstone local Source.
func NewSource(p TombstoneSourcePrm) Source {
- assert.False(p.s == nil, "Tombstone source: nil object service")
+ if p.s == nil {
+ panic("Tombstone source: nil object service")
+ }
return Source(p)
}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index dcaaec0b4..dbc9ea53c 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -28,10 +28,10 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
))
defer span.End()
- cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container())
+ cnr, err := p.cnrSrc.Get(objInfo.Address.Container())
if err != nil {
if client.IsErrContainerNotFound(err) {
- existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container())
+ existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, objInfo.Address.Container())
if errWasRemoved != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved)
} else if existed {
@@ -56,7 +56,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
idObj := objInfo.Address.Object()
idCnr := objInfo.Address.Container()
- nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy)
+ nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -86,7 +86,7 @@ func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectc
}
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
+ p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", objInfo.Address),
)
@@ -110,7 +110,6 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
// Number of copies that are stored on maintenance nodes.
var uncheckedCopies int
- var candidates []netmap.NodeInfo
for i := 0; shortage > 0 && i < len(nodes); i++ {
select {
case <-ctx.Done():
@@ -118,68 +117,71 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
default:
}
- var err error
- st := checkedNodes.processStatus(nodes[i])
- if !st.Processed() {
- st, err = p.checkStatus(ctx, addr, nodes[i])
- checkedNodes.set(nodes[i], st)
- if st == nodeDoesNotHoldObject {
- // 1. This is the first time the node is encountered (`!st.Processed()`).
- // 2. The node does not hold object (`st == nodeDoesNotHoldObject`).
- // So we need to try to put an object to it.
- candidates = append(candidates, nodes[i])
- continue
- }
- }
-
- switch st {
- case nodeIsLocal:
+ if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) {
requirements.needLocalCopy = true
shortage--
- case nodeIsUnderMaintenance:
- shortage--
- uncheckedCopies++
+ } else if nodes[i].Status().IsMaintenance() {
+ shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
+ } else {
+ if status := checkedNodes.processStatus(nodes[i]); status.Processed() {
+ if status == nodeHoldsObject {
+ // node already contains replica, no need to replicate
+ nodes = append(nodes[:i], nodes[i+1:]...)
+ i--
+ shortage--
+ }
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK,
- zap.String("node", netmap.StringifyPublicKey(nodes[i])))
- case nodeHoldsObject:
- shortage--
- case nodeDoesNotHoldObject:
- case nodeStatusUnknown:
- p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
- zap.Stringer("object", addr),
- zap.Error(err))
- default:
- panic("unreachable")
+ continue
+ }
+
+ callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
+
+ _, err := p.remoteHeader(callCtx, nodes[i], addr, false)
+
+ cancel()
+
+ if err == nil {
+ shortage--
+ checkedNodes.submitReplicaHolder(nodes[i])
+ } else {
+ if client.IsErrObjectNotFound(err) {
+ checkedNodes.submitReplicaCandidate(nodes[i])
+ continue
+ } else if client.IsErrNodeUnderMaintenance(err) {
+ shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
+ } else {
+ p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
+ zap.Stringer("object", addr),
+ zap.String("error", err.Error()),
+ )
+ }
+ }
}
+
+ nodes = append(nodes[:i], nodes[i+1:]...)
+ i--
}
- p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies)
+ p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
}
-func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) {
- if p.netmapKeys.IsLocalKey(node.PublicKey()) {
- return nodeIsLocal, nil
- }
- if node.Status().IsMaintenance() {
- return nodeIsUnderMaintenance, nil
- }
+// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
+//
+// consider remote nodes under maintenance as problem OK. Such
+// nodes MAY not respond with object, however, this is how we
+// prevent spam with new replicas.
+// However, additional copies should not be removed in this case,
+// because we can remove the only copy this way.
+func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
+ checkedNodes.submitReplicaHolder(node)
+ shortage--
+ uncheckedCopies++
- callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
- _, err := p.remoteHeader(callCtx, node, addr, false)
- cancel()
-
- if err == nil {
- return nodeHoldsObject, nil
- }
- if client.IsErrObjectNotFound(err) {
- return nodeDoesNotHoldObject, nil
- }
- if client.IsErrNodeUnderMaintenance(err) {
- return nodeIsUnderMaintenance, nil
- }
- return nodeStatusUnknown, err
+ p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK,
+ zap.String("node", netmap.StringifyPublicKey(node)),
+ )
+ return shortage, uncheckedCopies
}
func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
@@ -187,7 +189,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
) {
switch {
case shortage > 0:
- p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected,
+ p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected,
zap.Stringer("object", addr),
zap.Uint32("shortage", shortage),
)
@@ -203,7 +205,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
case uncheckedCopies > 0:
// If we have more copies than needed, but some of them are from the maintenance nodes,
// save the local copy.
- p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
+ p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
zap.Int("count", uncheckedCopies))
case uncheckedCopies == 0:
diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go
index 69879c439..d4c7ccbf9 100644
--- a/pkg/services/policer/check_test.go
+++ b/pkg/services/policer/check_test.go
@@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) {
cache.SubmitSuccessfulReplication(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
- cache.set(node, nodeDoesNotHoldObject)
+ cache.submitReplicaCandidate(node)
require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject)
- cache.set(node, nodeHoldsObject)
+ cache.submitReplicaHolder(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
}
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index fbdeb3148..6d2c153c9 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -39,7 +39,7 @@ func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectco
// All of them must be stored on all of the container nodes.
func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
objID := objInfo.Address.Object()
- nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy)
+ nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -59,7 +59,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes)
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
+ p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", objInfo.Address),
)
@@ -69,7 +69,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
}
func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
- nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
+ nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -91,7 +91,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
if res.removeLocal {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
+ p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
p.cbRedundantCopy(ctx, objInfo.Address)
}
return nil
@@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
var removeLocalChunk bool
requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
- if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
// current node is required node, we are happy
return ecChunkProcessResult{
validPlacement: true,
@@ -109,7 +109,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
}
if requiredNode.Status().IsMaintenance() {
// consider maintenance mode has object, but do not drop local copy
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
return ecChunkProcessResult{}
}
@@ -120,7 +120,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
if err == nil {
removeLocalChunk = true
} else if client.IsErrObjectNotFound(err) {
- p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
+ p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
task := replicator.Task{
NumCopies: 1,
Addr: objInfo.Address,
@@ -129,9 +129,9 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
p.replicator.HandleReplicationTask(ctx, task, newNodeCache())
} else if client.IsErrNodeUnderMaintenance(err) {
// consider maintenance mode has object, but do not drop local copy
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
} else {
- p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err))
+ p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error()))
}
return ecChunkProcessResult{
@@ -146,13 +146,13 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I
requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo)
if len(requiredChunkIndexes) == 0 {
- p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
+ p.log.Info(logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
return true
}
err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes)
if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
return false
}
if len(requiredChunkIndexes) == 0 {
@@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec
if uint32(i) == objInfo.ECInfo.Total {
break
}
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
}
}
@@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad
func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
var eiErr *objectSDK.ECInfoError
for _, n := range nodes {
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
continue
}
_, err := p.remoteHeader(ctx, n, parentAddress, true)
@@ -224,11 +224,11 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
var chunkID oid.ID
if err := chunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
return false
}
if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID {
- p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
+ p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
return false
}
@@ -239,7 +239,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
for index, candidates := range required {
if len(candidates) == 0 {
- p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
+ p.log.Error(logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
return false
}
}
@@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
return
}
var err error
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
_, err = p.localHeader(ctx, parentAddress)
} else {
_, err = p.remoteHeader(ctx, n, parentAddress, true)
@@ -271,20 +271,18 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
resolved[ch.Index] = append(resolved[ch.Index], n)
var ecInfoChunkID oid.ID
if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
return
}
if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID {
- p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
+ p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
return
}
chunkIDs[ch.Index] = ecInfoChunkID
}
- } else if client.IsErrObjectAlreadyRemoved(err) {
- restore = false
- } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
- p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
+ } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
+ p.log.Warn(logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
p.replicator.HandleReplicationTask(ctx, replicator.Task{
NumCopies: 1,
Addr: objInfo.Address,
@@ -301,7 +299,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
for i := range resolved {
found = append(found, i)
}
- p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
+ p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
return
}
p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
@@ -312,7 +310,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
) {
c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs)
@@ -321,7 +319,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
}
key, err := p.keyStorage.GetKey(nil)
if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
required := make([]bool, len(parts))
@@ -331,7 +329,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
}
}
if err := c.ReconstructParts(parts, required, key); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
for idx, part := range parts {
@@ -343,7 +341,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
pID, _ := part.ID()
addr.SetObject(pID)
targetNode := nodes[idx%len(nodes)]
- if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
p.replicator.HandleLocalPutTask(ctx, replicator.Task{
Addr: addr,
Obj: part,
@@ -371,7 +369,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
var obj *objectSDK.Object
var err error
for _, node := range nodes {
- if p.netmapKeys.IsLocalKey(node.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
obj, err = p.localObject(egCtx, objID)
} else {
obj, err = p.remoteObject(egCtx, node, objID)
@@ -379,7 +377,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
if err == nil {
break
}
- p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
+ p.log.Warn(logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
}
if obj != nil {
parts[idx] = obj
@@ -388,7 +386,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
})
}
if err := errGroup.Wait(); err != nil {
- p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
return nil
}
return parts
diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go
index c6980536b..e230153f9 100644
--- a/pkg/services/policer/ec_test.go
+++ b/pkg/services/policer/ec_test.go
@@ -36,7 +36,7 @@ func TestECChunkHasValidPlacement(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(chunkAddress.Container()) {
return cnr, nil
}
@@ -123,7 +123,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(chunkAddress.Container()) {
return cnr, nil
}
@@ -448,7 +448,7 @@ func TestECChunkRestore(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(parentAddress.Container()) {
return cnr, nil
}
@@ -599,7 +599,7 @@ func TestECChunkRestoreNodeOff(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(parentAddress.Container()) {
return cnr, nil
}
diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go
index c2157de5d..cd47cb0fc 100644
--- a/pkg/services/policer/nodecache.go
+++ b/pkg/services/policer/nodecache.go
@@ -8,9 +8,6 @@ const (
nodeNotProcessed nodeProcessStatus = iota
nodeDoesNotHoldObject
nodeHoldsObject
- nodeStatusUnknown
- nodeIsUnderMaintenance
- nodeIsLocal
)
func (st nodeProcessStatus) Processed() bool {
@@ -18,19 +15,37 @@ func (st nodeProcessStatus) Processed() bool {
}
// nodeCache tracks Policer's check progress.
-type nodeCache map[uint64]nodeProcessStatus
+type nodeCache map[uint64]bool
func newNodeCache() nodeCache {
- return make(map[uint64]nodeProcessStatus)
+ return make(map[uint64]bool)
}
-func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) {
+func (n nodeCache) set(node netmap.NodeInfo, val bool) {
n[node.Hash()] = val
}
+// submits storage node as a candidate to store the object replica in case of
+// shortage.
+func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) {
+ n.set(node, false)
+}
+
+// submits storage node as a current object replica holder.
+func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) {
+ n.set(node, true)
+}
+
// processStatus returns current processing status of the storage node.
func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
- return n[node.Hash()]
+ switch val, ok := n[node.Hash()]; {
+ case !ok:
+ return nodeNotProcessed
+ case val:
+ return nodeHoldsObject
+ default:
+ return nodeDoesNotHoldObject
+ }
}
// SubmitSuccessfulReplication marks given storage node as a current object
@@ -38,5 +53,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
//
// SubmitSuccessfulReplication implements replicator.TaskResult.
func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
- n.set(node, nodeHoldsObject)
+ n.submitReplicaHolder(node)
}
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
index 5d59604c2..336f7a0ab 100644
--- a/pkg/services/policer/option.go
+++ b/pkg/services/policer/option.go
@@ -91,7 +91,7 @@ type cfg struct {
func defaultCfg() *cfg {
return &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
batchSize: 10,
cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB
sleepDuration: 1 * time.Second,
diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go
index c91e7cc7c..363c0b922 100644
--- a/pkg/services/policer/policer.go
+++ b/pkg/services/policer/policer.go
@@ -1,13 +1,13 @@
package policer
import (
- "fmt"
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
+ "go.uber.org/zap"
)
type objectsInWork struct {
@@ -55,8 +55,12 @@ func New(opts ...Option) *Policer {
opts[i](c)
}
+ c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))}
+
cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize))
- assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize))
+ if err != nil {
+ panic(err)
+ }
return &Policer{
cfg: c,
diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go
index 049c33753..4e17e98a8 100644
--- a/pkg/services/policer/policer_test.go
+++ b/pkg/services/policer/policer_test.go
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"errors"
- "slices"
"sort"
"testing"
"time"
@@ -37,10 +36,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) {
// Container source and bury function
buryCh := make(chan oid.Address)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -79,7 +78,6 @@ func TestProcessObject(t *testing.T) {
maintenanceNodes []int
wantRemoveRedundant bool
wantReplicateTo []int
- headResult map[int]error
ecInfo *objectcore.ECInfo
}{
{
@@ -129,7 +127,7 @@ func TestProcessObject(t *testing.T) {
nodeCount: 2,
policy: `REP 2 REP 2`,
placement: [][]int{{0, 1}, {0, 1}},
- wantReplicateTo: []int{1},
+ wantReplicateTo: []int{1, 1}, // is this actually good?
},
{
desc: "lock object must be replicated to all nodes",
@@ -147,14 +145,6 @@ func TestProcessObject(t *testing.T) {
objHolders: []int{1},
maintenanceNodes: []int{2},
},
- {
- desc: "preserve local copy when node response with MAINTENANCE",
- nodeCount: 3,
- policy: `REP 2`,
- placement: [][]int{{1, 2}},
- objHolders: []int{1},
- headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)},
- },
{
desc: "lock object must be replicated to all EC nodes",
objType: objectSDK.TypeLock,
@@ -171,14 +161,6 @@ func TestProcessObject(t *testing.T) {
placement: [][]int{{0, 1, 2}},
wantReplicateTo: []int{1, 2},
},
- {
- desc: "do not remove local copy when MAINTENANCE status is cached",
- objType: objectSDK.TypeRegular,
- nodeCount: 3,
- policy: `REP 1 REP 1`,
- placement: [][]int{{1, 2}, {1, 0}},
- headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)},
- },
}
for i := range tests {
@@ -222,14 +204,11 @@ func TestProcessObject(t *testing.T) {
t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a)
return nil, errors.New("unexpected object head")
}
- if ti.headResult != nil {
- if err, ok := ti.headResult[index]; ok {
- return nil, err
+ for _, i := range ti.objHolders {
+ if index == i {
+ return nil, nil
}
}
- if slices.Contains(ti.objHolders, index) {
- return nil, nil
- }
return nil, new(apistatus.ObjectNotFound)
}
@@ -238,14 +217,14 @@ func TestProcessObject(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(addr.Container()) {
return cnr, nil
}
t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container())
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -303,10 +282,10 @@ func TestProcessObjectError(t *testing.T) {
cnr := &container.Container{}
cnr.Value.Init()
source := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return nil, new(apistatus.ContainerNotFound)
},
}
@@ -351,10 +330,10 @@ func TestIteratorContract(t *testing.T) {
}
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -443,22 +422,18 @@ func (it *sliceKeySpaceIterator) Rewind() {
}
type containerSrc struct {
- get func(ctx context.Context, id cid.ID) (*container.Container, error)
- deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error)
+ get func(id cid.ID) (*container.Container, error)
+ deletionInfo func(id cid.ID) (*container.DelInfo, error)
}
-func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
- return f.get(ctx, id)
-}
+func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) }
-func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
- return f.deletionInfo(ctx, id)
-}
+func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) }
// placementBuilderFunc is a placement.Builder backed by a function
type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
-func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return f(c, o, p)
}
diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go
index 635a5683b..a5ebb0010 100644
--- a/pkg/services/policer/process.go
+++ b/pkg/services/policer/process.go
@@ -7,20 +7,17 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
func (p *Policer) Run(ctx context.Context) {
p.shardPolicyWorker(ctx)
- p.log.Info(ctx, logs.PolicerRoutineStopped)
+ p.log.Info(logs.PolicerRoutineStopped)
}
func (p *Policer) shardPolicyWorker(ctx context.Context) {
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String())
for {
select {
case <-ctx.Done():
@@ -36,7 +33,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
continue
}
- p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
+ p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
}
skipMap := newSkipMap()
@@ -62,9 +59,9 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
if p.objsInWork.add(addr.Address) {
err := p.processObject(ctx, addr)
if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
- p.log.Error(ctx, logs.PolicerUnableToProcessObj,
+ p.log.Error(logs.PolicerUnableToProcessObj,
zap.Stringer("object", addr.Address),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
p.cache.Add(addr.Address, time.Now())
p.objsInWork.remove(addr.Address)
@@ -72,7 +69,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
}
})
if err != nil {
- p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err))
+ p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err))
}
}
}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 8c6f0df06..7e5c6e093 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
@@ -26,7 +27,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork,
+ p.log.Debug(logs.ReplicatorFinishWork,
zap.Uint32("amount of unfinished replicas", task.NumCopies),
)
}()
@@ -42,9 +43,10 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
var err error
task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr)
if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err))
+ zap.Error(err),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return
}
@@ -63,6 +65,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
log := p.log.With(
zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])),
zap.Stringer("object", task.Addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
callCtx, cancel := context.WithTimeout(ctx, p.putTimeout)
@@ -72,11 +75,11 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
cancel()
if err != nil {
- log.Error(ctx, logs.ReplicatorCouldNotReplicateObject,
- zap.Error(err),
+ log.Error(logs.ReplicatorCouldNotReplicateObject,
+ zap.String("error", err.Error()),
)
} else {
- log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated)
+ log.Debug(logs.ReplicatorObjectSuccessfullyReplicated)
task.NumCopies--
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
index 216fe4919..7e7090237 100644
--- a/pkg/services/replicator/pull.go
+++ b/pkg/services/replicator/pull.go
@@ -3,12 +3,12 @@ package replicator
import (
"context"
"errors"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
@@ -22,7 +22,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
}()
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask",
@@ -43,24 +43,31 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
if err == nil {
break
}
- endpoints := slices.Collect(node.NetworkEndpoints())
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ var endpoints []string
+ node.IterateNetworkEndpoints(func(s string) bool {
+ endpoints = append(endpoints, s)
+ return false
+ })
+ p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
zap.Stringer("object", task.Addr),
zap.Error(err),
- zap.Strings("endpoints", endpoints))
+ zap.Strings("endpoints", endpoints),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if obj == nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
zap.Stringer("object", task.Addr),
- zap.Error(errFailedToGetObjectFromAnyNode))
+ zap.Error(errFailedToGetObjectFromAnyNode),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return
}
err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err))
+ zap.Error(err),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
index bcad8471d..537833516 100644
--- a/pkg/services/replicator/put.go
+++ b/pkg/services/replicator/put.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -19,7 +20,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
}()
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask",
@@ -30,16 +31,18 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
defer span.End()
if task.Obj == nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(errObjectNotDefined))
+ zap.Error(errObjectNotDefined),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return
}
err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err))
+ zap.Error(err),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index a940cef37..f2f86daf0 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -7,6 +7,7 @@ import (
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
// Replicator represents the utility that replicates
@@ -44,6 +45,8 @@ func New(opts ...Option) *Replicator {
opts[i](c)
}
+ c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))}
+
return &Replicator{
cfg: c,
}
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index f0591de71..e914119b4 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -33,7 +33,10 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log
}
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create"))
+ s.log.Debug(logs.ServingRequest,
+ zap.String("component", "SessionService"),
+ zap.String("request", "Create"),
+ )
respBody, err := s.exec.Create(ctx, req.GetBody())
if err != nil {
diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go
index 60db97f90..411734ea1 100644
--- a/pkg/services/session/storage/persistent/options.go
+++ b/pkg/services/session/storage/persistent/options.go
@@ -19,7 +19,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- l: logger.NewLoggerWrapper(zap.L()),
+ l: &logger.Logger{Logger: zap.L()},
timeout: 100 * time.Millisecond,
}
}
diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go
index 132d62445..71711e371 100644
--- a/pkg/services/session/storage/persistent/storage.go
+++ b/pkg/services/session/storage/persistent/storage.go
@@ -1,7 +1,6 @@
package persistent
import (
- "context"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
@@ -64,7 +63,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
// enable encryption if it
// was configured so
if cfg.privateKey != nil {
- rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8)
+ rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8)
cfg.privateKey.D.FillBytes(rawKey)
c, err := aes.NewCipher(rawKey)
@@ -106,7 +105,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok
return err
})
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage,
+ s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage,
zap.Error(err),
zap.Stringer("ownerID", ownerID),
zap.String("tokenID", hex.EncodeToString(tokenID)),
@@ -131,7 +130,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
if epochFromToken(v) <= epoch {
err = c.Delete()
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken,
+ s.l.Error(logs.PersistentCouldNotDeleteSToken,
zap.String("token_id", hex.EncodeToString(k)),
)
}
@@ -142,7 +141,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
})
})
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens,
+ s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens,
zap.Uint64("epoch", epoch),
)
}
diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go
index 423e579d7..d531b25cb 100644
--- a/pkg/services/session/storage/temporary/executor.go
+++ b/pkg/services/session/storage/temporary/executor.go
@@ -38,7 +38,7 @@ func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody)
s.mtx.Lock()
s.tokens[key{
tokenID: base58.Encode(uidBytes),
- ownerID: id.EncodeToString(),
+ ownerID: base58.Encode(id.WalletBytes()),
}] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration())
s.mtx.Unlock()
diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go
index c9da6b842..9ae9db9dc 100644
--- a/pkg/services/session/storage/temporary/storage.go
+++ b/pkg/services/session/storage/temporary/storage.go
@@ -41,7 +41,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken
s.mtx.RLock()
t := s.tokens[key{
tokenID: base58.Encode(tokenID),
- ownerID: ownerID.EncodeToString(),
+ ownerID: base58.Encode(ownerID.WalletBytes()),
}]
s.mtx.RUnlock()
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index 58757ff6d..69cf59405 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -22,7 +22,7 @@ import (
)
func (s *Service) newAPERequest(ctx context.Context, namespace string,
- cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+ cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) (aperequest.Request, error) {
schemaMethod, err := converter.SchemaMethodFromACLOperation(operation)
if err != nil {
@@ -36,7 +36,7 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()),
nativeschema.PropertyKeyActorRole: schemaRole,
}
- reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey)
+ reqProps, err = s.fillWithUserClaimTags(reqProps, publicKey)
if err != nil {
return aperequest.Request{}, err
}
@@ -53,19 +53,15 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
}
- resProps := map[string]string{
- nativeschema.ProperyKeyTreeID: treeID,
- }
-
return aperequest.NewRequest(
schemaMethod,
- aperequest.NewResource(resourceName, resProps),
+ aperequest.NewResource(resourceName, make(map[string]string)),
reqProps,
), nil
}
func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
- container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+ container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) error {
namespace := ""
cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns")
@@ -73,27 +69,28 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
namespace = cntNamespace
}
- request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey)
+ request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey)
if err != nil {
return fmt.Errorf("failed to create ape request: %w", err)
}
- return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{
+ return s.apeChecker.CheckAPE(checkercore.CheckPrm{
Request: request,
Namespace: namespace,
Container: cid,
ContainerOwner: container.Value.Owner(),
PublicKey: publicKey,
BearerToken: bt,
+ SoftAPECheck: false,
})
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
+func (s *Service) fillWithUserClaimTags(reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey)
+ props, err := aperequest.FormFrostfsIDRequestProperties(s.frostfsidSubjectProvider, publicKey)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go
index 7b209fd47..3f94925b5 100644
--- a/pkg/services/tree/ape_test.go
+++ b/pkg/services/tree/ape_test.go
@@ -37,7 +37,7 @@ type frostfsIDProviderMock struct {
subjectsExtended map[util.Uint160]*client.SubjectExtended
}
-func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
+func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) {
v, ok := f.subjects[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -45,7 +45,7 @@ func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160
return v, nil
}
-func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) {
v, ok := f.subjectsExtended[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -107,45 +107,6 @@ func TestCheckAPE(t *testing.T) {
cid := cid.ID{}
_ = cid.DecodeString(containerID)
- t.Run("treeID rule", func(t *testing.T) {
- los := inmemory.NewInmemoryLocalStorage()
- mcs := inmemory.NewInmemoryMorphRuleChainStorage()
- fid := newFrostfsIDProviderMock(t)
- s := Service{
- cfg: cfg{
- frostfsidSubjectProvider: fid,
- },
- apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
- }
-
- mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.QuotaLimitReached,
- Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindResource,
- Key: nativeschema.ProperyKeyTreeID,
- Value: versionTreeID,
- },
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey())
-
- var chErr *checkercore.ChainRouterError
- require.ErrorAs(t, err, &chErr)
- require.Equal(t, chain.QuotaLimitReached, chErr.Status())
- })
-
t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) {
los := inmemory.NewInmemoryLocalStorage()
mcs := inmemory.NewInmemoryMorphRuleChainStorage()
@@ -191,7 +152,7 @@ func TestCheckAPE(t *testing.T) {
MatchType: chain.MatchTypeFirstMatch,
})
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
require.NoError(t, err)
})
@@ -240,7 +201,7 @@ func TestCheckAPE(t *testing.T) {
MatchType: chain.MatchTypeFirstMatch,
})
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
require.NoError(t, err)
})
}
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index a11700771..ac80d0e4c 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -10,9 +10,12 @@ import (
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
+ tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/credentials/insecure"
)
type clientCache struct {
@@ -48,7 +51,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
c.Lock()
- ccInt, ok := c.Get(netmapAddr)
+ ccInt, ok := c.LRU.Get(netmapAddr)
c.Unlock()
if ok {
@@ -66,19 +69,14 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
}
}
- var netAddr network.Address
- if err := netAddr.FromString(netmapAddr); err != nil {
- return nil, err
- }
-
- cc, err := dialTreeService(ctx, netAddr, c.key, c.ds)
+ cc, err := c.dialTreeService(ctx, netmapAddr)
lastTry := time.Now()
c.Lock()
if err != nil {
- c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
+ c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
} else {
- c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
+ c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
}
c.Unlock()
@@ -88,3 +86,48 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
return NewTreeServiceClient(cc), nil
}
+
+func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
+ var netAddr network.Address
+ if err := netAddr.FromString(netmapAddr); err != nil {
+ return nil, err
+ }
+
+ opts := []grpc.DialOption{
+ grpc.WithChainUnaryInterceptor(
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ ),
+ grpc.WithContextDialer(c.ds.GrpcContextDialer()),
+ grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
+ }
+
+ if !netAddr.IsTLSEnabled() {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ req := &HealthcheckRequest{
+ Body: &HealthcheckRequest_Body{},
+ }
+ if err := SignMessage(req, c.key); err != nil {
+ return nil, err
+ }
+
+ cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ defer cancel()
+ // perform some request to check connection
+ if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
+ _ = cc.Close()
+ return nil, err
+ }
+ return cc, nil
+}
diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go
index c641a21a2..435257550 100644
--- a/pkg/services/tree/container.go
+++ b/pkg/services/tree/container.go
@@ -2,7 +2,6 @@ package tree
import (
"bytes"
- "context"
"crypto/sha256"
"fmt"
"sync"
@@ -33,13 +32,13 @@ type containerCacheItem struct {
const defaultContainerCacheSize = 10
// getContainerNodes returns nodes in the container and a position of local key in the list.
-func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
- nm, err := s.nmSource.GetNetMap(ctx, 0)
+func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
+ nm, err := s.nmSource.GetNetMap(0)
if err != nil {
return nil, -1, fmt.Errorf("can't get netmap: %w", err)
}
- cnr, err := s.cnrSource.Get(ctx, cid)
+ cnr, err := s.cnrSource.Get(cid)
if err != nil {
return nil, -1, fmt.Errorf("can't get container: %w", err)
}
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index e7a13827e..95bdda34b 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -131,7 +131,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) {
t.Run("boltdb forest", func(t *testing.T) {
p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
require.NoError(t, p.Open(context.Background(), 0o644))
- require.NoError(t, p.Init(context.Background()))
+ require.NoError(t, p.Init())
testGetSubTreeOrderAsc(t, p)
})
}
diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go
index 07503f8c3..0f0e4ee57 100644
--- a/pkg/services/tree/metrics.go
+++ b/pkg/services/tree/metrics.go
@@ -6,7 +6,6 @@ type MetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
- AddOperation(string, string)
}
type defaultMetricsRegister struct{}
@@ -14,4 +13,3 @@ type defaultMetricsRegister struct{}
func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {}
-func (defaultMetricsRegister) AddOperation(string, string) {}
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 56cbcc081..1633ae557 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -1,9 +1,7 @@
package tree
import (
- "context"
"crypto/ecdsa"
- "sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
@@ -20,12 +18,12 @@ import (
type ContainerSource interface {
container.Source
- DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error)
+ DeletionInfo(cid.ID) (*container.DelInfo, error)
// List must return list of all the containers in the FrostFS network
// at the moment of a call and any error that does not allow fetching
// container information.
- List(ctx context.Context) ([]cid.ID, error)
+ List() ([]cid.ID, error)
}
type cfg struct {
@@ -42,8 +40,7 @@ type cfg struct {
replicatorWorkerCount int
replicatorTimeout time.Duration
containerCacheSize int
- authorizedKeys atomic.Pointer[[][]byte]
- syncBatchSize int
+ authorizedKeys [][]byte
localOverrideStorage policyengine.LocalOverrideStorage
morphChainStorage policyengine.MorphRuleChainStorageReader
@@ -116,12 +113,6 @@ func WithReplicationWorkerCount(n int) Option {
}
}
-func WithSyncBatchSize(n int) Option {
- return func(c *cfg) {
- c.syncBatchSize = n
- }
-}
-
func WithContainerCacheSize(n int) Option {
return func(c *cfg) {
if n > 0 {
@@ -148,7 +139,10 @@ func WithMetrics(v MetricsRegister) Option {
// keys that have rights to use Tree service.
func WithAuthorizedKeys(keys keys.PublicKeys) Option {
return func(c *cfg) {
- c.authorizedKeys.Store(fromPublicKeys(keys))
+ c.authorizedKeys = nil
+ for _, key := range keys {
+ c.authorizedKeys = append(c.authorizedKeys, key.Bytes())
+ }
}
}
diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go
deleted file mode 100644
index 8f21686df..000000000
--- a/pkg/services/tree/qos.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package tree
-
-import (
- "context"
-
- "google.golang.org/grpc"
-)
-
-var _ TreeServiceServer = (*ioTagAdjust)(nil)
-
-type AdjustIOTag interface {
- AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
-}
-
-type ioTagAdjust struct {
- s TreeServiceServer
- a AdjustIOTag
-}
-
-func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer {
- return &ioTagAdjust{
- s: s,
- a: a,
- }
-}
-
-func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Add(ctx, req)
-}
-
-func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.AddByPath(ctx, req)
-}
-
-func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Apply(ctx, req)
-}
-
-func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.GetNodeByPath(ctx, req)
-}
-
-func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
- ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
- return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{
- sender: srv,
- ServerStream: srv,
- ctxF: func() context.Context { return ctx },
- })
-}
-
-func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
- ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
- return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{
- sender: srv,
- ServerStream: srv,
- ctxF: func() context.Context { return ctx },
- })
-}
-
-func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Healthcheck(ctx, req)
-}
-
-func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Move(ctx, req)
-}
-
-func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Remove(ctx, req)
-}
-
-func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.TreeList(ctx, req)
-}
-
-type qosSend[T any] interface {
- Send(T) error
-}
-
-type qosServerWrapper[T any] struct {
- grpc.ServerStream
- sender qosSend[T]
- ctxF func() context.Context
-}
-
-func (w *qosServerWrapper[T]) Send(resp T) error {
- return w.sender.Send(resp)
-}
-
-func (w *qosServerWrapper[T]) Context() context.Context {
- return w.ctxF()
-}
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index 647f8cb30..5bde3ae38 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -6,6 +6,7 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
@@ -19,8 +20,8 @@ var errNoSuitableNode = errors.New("no node was found to execute the request")
func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) {
var resp *Resp
var outErr error
- err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool {
- resp, outErr = callback(c, fCtx, req)
+ err := s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = callback(c, ctx, req)
return true
})
if err != nil {
@@ -31,7 +32,7 @@ func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapS
// forEachNode executes callback for each node in the container until true is returned.
// Returns errNoSuitableNode if there was no successful attempt to dial any node.
-func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error {
+func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error {
for _, n := range cntNodes {
if bytes.Equal(n.PublicKey(), s.rawPub) {
return nil
@@ -41,15 +42,25 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
var called bool
for _, n := range cntNodes {
var stop bool
- for endpoint := range n.NetworkEndpoints() {
- stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool {
- called = true
- return f(fCtx, c)
- })
- if called {
- break
+ n.IterateNetworkEndpoints(func(endpoint string) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
+ trace.WithAttributes(
+ attribute.String("endpoint", endpoint),
+ ))
+ defer span.End()
+
+ c, err := s.cache.get(ctx, endpoint)
+ if err != nil {
+ return false
}
- }
+
+ s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+
+ called = true
+ stop = f(c)
+ return true
+ })
if stop {
return nil
}
@@ -59,19 +70,3 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
}
return nil
}
-
-func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
- trace.WithAttributes(
- attribute.String("endpoint", endpoint),
- ))
- defer span.End()
-
- c, err := s.cache.get(ctx, endpoint)
- if err != nil {
- return false
- }
-
- s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint))
- return f(ctx, c)
-}
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index ee40884eb..95c8f8013 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -39,7 +40,6 @@ const (
defaultReplicatorCapacity = 64
defaultReplicatorWorkerCount = 64
defaultReplicatorSendTimeout = time.Second * 5
- defaultSyncBatchSize = 1000
)
func (s *Service) localReplicationWorker(ctx context.Context) {
@@ -57,8 +57,8 @@ func (s *Service) localReplicationWorker(ctx context.Context) {
err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
if err != nil {
- s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation,
- zap.Error(err))
+ s.log.Error(logs.TreeFailedToApplyReplicatedOperation,
+ zap.String("err", err.Error()))
}
span.End()
}
@@ -89,23 +89,41 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
var lastErr error
var lastAddr string
- for addr := range n.NetworkEndpoints() {
+ n.IterateNetworkEndpoints(func(addr string) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
+ attribute.String("address", addr),
+ ),
+ )
+ defer span.End()
+
lastAddr = addr
- lastErr = s.apply(ctx, n, addr, req)
- if lastErr == nil {
- break
+
+ c, err := s.cache.get(ctx, addr)
+ if err != nil {
+ lastErr = fmt.Errorf("can't create client: %w", err)
+ return false
}
- }
+
+ ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
+ _, lastErr = c.Apply(ctx, req)
+ cancel()
+
+ return lastErr == nil
+ })
if lastErr != nil {
if errors.Is(lastErr, errRecentlyFailed) {
- s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode,
- zap.String("last_error", lastErr.Error()))
+ s.log.Debug(logs.TreeDoNotSendUpdateToTheNode,
+ zap.String("last_error", lastErr.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
} else {
- s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode,
+ s.log.Warn(logs.TreeFailedToSentUpdateToTheNode,
zap.String("last_error", lastErr.Error()),
zap.String("address", lastAddr),
- zap.String("key", hex.EncodeToString(n.PublicKey())))
+ zap.String("key", hex.EncodeToString(n.PublicKey())),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
s.metrics.AddReplicateTaskDuration(time.Since(start), false)
return lastErr
@@ -114,26 +132,6 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
return nil
}
-func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
- trace.WithAttributes(
- attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
- attribute.String("address", addr),
- ),
- )
- defer span.End()
-
- c, err := s.cache.get(ctx, addr)
- if err != nil {
- return fmt.Errorf("can't create client: %w", err)
- }
-
- ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
- _, err = c.Apply(ctx, req)
- cancel()
- return err
-}
-
func (s *Service) replicateLoop(ctx context.Context) {
for range s.replicatorWorkerCount {
go s.replicationWorker(ctx)
@@ -153,10 +151,10 @@ func (s *Service) replicateLoop(ctx context.Context) {
return
case op := <-s.replicateCh:
start := time.Now()
- err := s.replicate(ctx, op)
+ err := s.replicate(op)
if err != nil {
- s.log.Error(ctx, logs.TreeErrorDuringReplication,
- zap.Error(err),
+ s.log.Error(logs.TreeErrorDuringReplication,
+ zap.String("err", err.Error()),
zap.Stringer("cid", op.cid),
zap.String("treeID", op.treeID))
}
@@ -165,14 +163,14 @@ func (s *Service) replicateLoop(ctx context.Context) {
}
}
-func (s *Service) replicate(ctx context.Context, op movePair) error {
+func (s *Service) replicate(op movePair) error {
req := newApplyRequest(&op)
err := SignMessage(req, s.key)
if err != nil {
return fmt.Errorf("can't sign data: %w", err)
}
- nodes, localIndex, err := s.getContainerNodes(ctx, op.cid)
+ nodes, localIndex, err := s.getContainerNodes(op.cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -206,7 +204,7 @@ func newApplyRequest(op *movePair) *ApplyRequest {
TreeId: op.treeID,
Operation: &LogMove{
ParentId: op.op.Parent,
- Meta: op.op.Bytes(),
+ Meta: op.op.Meta.Bytes(),
ChildId: op.op.Child,
},
},
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 3994d6973..8097d545c 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -9,15 +9,12 @@ import (
"sync"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
@@ -58,16 +55,14 @@ func New(opts ...Option) *Service {
s.replicatorChannelCapacity = defaultReplicatorCapacity
s.replicatorWorkerCount = defaultReplicatorWorkerCount
s.replicatorTimeout = defaultReplicatorSendTimeout
- s.syncBatchSize = defaultSyncBatchSize
s.metrics = defaultMetricsRegister{}
- s.authorizedKeys.Store(&[][]byte{})
for i := range opts {
opts[i](&s.cfg)
}
if s.log == nil {
- s.log = logger.NewLoggerWrapper(zap.NewNop())
+ s.log = &logger.Logger{Logger: zap.NewNop()}
}
s.cache.init(s.key, s.ds)
@@ -87,7 +82,6 @@ func New(opts ...Option) *Service {
// Start starts the service.
func (s *Service) Start(ctx context.Context) {
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String())
go s.replicateLoop(ctx)
go s.syncLoop(ctx)
@@ -107,7 +101,6 @@ func (s *Service) Shutdown() {
}
func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
- defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -119,12 +112,12 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
@@ -151,7 +144,6 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
}
func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
- defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -163,12 +155,12 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
@@ -207,7 +199,6 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
}
func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
- defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -219,12 +210,12 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
@@ -252,7 +243,6 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
// Move applies client operation to the specified tree and pushes in queue
// for replication on other nodes.
func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
- defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -264,12 +254,12 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
@@ -296,7 +286,6 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
}
func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
- defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -308,12 +297,12 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
@@ -347,11 +336,14 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
} else {
var metaValue []KeyValue
for _, kv := range m.Items {
- if slices.Contains(b.GetAttributes(), kv.Key) {
- metaValue = append(metaValue, KeyValue{
- Key: kv.Key,
- Value: kv.Value,
- })
+ for _, attr := range b.GetAttributes() {
+ if kv.Key == attr {
+ metaValue = append(metaValue, KeyValue{
+ Key: kv.Key,
+ Value: kv.Value,
+ })
+ break
+ }
}
}
x.Meta = metaValue
@@ -367,7 +359,6 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
}
func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
- defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -379,20 +370,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return err
}
- err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return err
}
- ns, pos, err := s.getContainerNodes(srv.Context(), cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetSubTreeClient
var outErr error
- err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
- cli, outErr = c.GetSubTree(fCtx, req)
+ err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
+ cli, outErr = c.GetSubTree(srv.Context(), req)
return true
})
if err != nil {
@@ -414,7 +405,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
type stackItem struct {
values []pilorama.MultiNodeInfo
parent pilorama.MultiNode
- last *pilorama.Cursor
+ last *string
}
func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
@@ -438,8 +429,10 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
}
if ms == nil {
ms = m.Items
- } else if len(m.Items) != 1 {
- return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
+ } else {
+ if len(m.Items) != 1 {
+ return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
+ }
}
ts = append(ts, m.Time)
ps = append(ps, p)
@@ -463,13 +456,14 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
break
}
- var err error
- item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
+ nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
if err != nil {
return err
}
+ item.values = nodes
+ item.last = last
- if len(item.values) == 0 {
+ if len(nodes) == 0 {
stack = stack[:len(stack)-1]
continue
}
@@ -591,8 +585,7 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di
}
// Apply locally applies operation from the remote node to the tree.
-func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
- defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx))
+func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) {
err := verifyMessage(req)
if err != nil {
return nil, err
@@ -605,7 +598,7 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse,
key := req.GetSignature().GetKey()
- _, pos, _, err := s.getContainerInfo(ctx, cid, key)
+ _, pos, _, err := s.getContainerInfo(cid, key)
if err != nil {
return nil, err
}
@@ -636,7 +629,6 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse,
}
func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
- defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -648,15 +640,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
return err
}
- ns, pos, err := s.getContainerNodes(srv.Context(), cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetOpLogClient
var outErr error
- err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
- cli, outErr = c.GetOpLog(fCtx, req)
+ err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
+ cli, outErr = c.GetOpLog(srv.Context(), req)
return true
})
if err != nil {
@@ -687,7 +679,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
Body: &GetOpLogResponse_Body{
Operation: &LogMove{
ParentId: lm.Parent,
- Meta: lm.Bytes(),
+ Meta: lm.Meta.Bytes(),
ChildId: lm.Child,
},
},
@@ -701,7 +693,6 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
}
func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
- defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -721,7 +712,7 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
@@ -763,8 +754,8 @@ func metaToProto(arr []pilorama.KeyValue) []KeyValue {
// getContainerInfo returns the list of container nodes, position in the container for the node
// with pub key and total amount of nodes in all replicas.
-func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
- cntNodes, _, err := s.getContainerNodes(ctx, cid)
+func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
+ cntNodes, _, err := s.getContainerNodes(cid)
if err != nil {
return nil, 0, 0, err
}
@@ -784,15 +775,3 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec
return new(HealthcheckResponse), nil
}
-
-func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) {
- s.authorizedKeys.Store(fromPublicKeys(newKeys))
-}
-
-func fromPublicKeys(keys keys.PublicKeys) *[][]byte {
- buff := make([][]byte, len(keys))
- for i, k := range keys {
- buff[i] = k.Bytes()
- }
- return &buff
-}
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 8221a4546..4fd4a7e1e 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -9,10 +9,8 @@ import (
"fmt"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@@ -38,7 +36,7 @@ var (
// Operation must be one of:
// - 1. ObjectPut;
// - 2. ObjectGet.
-func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error {
+func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error {
err := verifyMessage(req)
if err != nil {
return err
@@ -49,7 +47,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return err
}
- cnr, err := s.cnrSource.Get(ctx, cid)
+ cnr, err := s.cnrSource.Get(cid)
if err != nil {
return fmt.Errorf("can't get container %s: %w", cid, err)
}
@@ -64,22 +62,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return fmt.Errorf("can't get request role: %w", err)
}
- if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil {
- return apeErr(err)
- }
- return nil
-}
-
-func apeErr(err error) error {
- var chRouterErr *checkercore.ChainRouterError
- if !errors.As(err, &chRouterErr) {
- errServerInternal := &apistatus.ServerInternal{}
- apistatus.WriteInternalServerErr(errServerInternal, err)
- return errServerInternal
- }
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(err.Error())
- return errAccessDenied
+ return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey)
}
// Returns true iff the operation is read-only and request was signed
@@ -95,8 +78,8 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) {
}
key := sign.GetKey()
- for _, currentKey := range *s.authorizedKeys.Load() {
- if bytes.Equal(currentKey, key) {
+ for i := range s.authorizedKeys {
+ if bytes.Equal(s.authorizedKeys[i], key) {
return true, nil
}
}
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index 8815c227f..7bc5002dc 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -31,8 +31,6 @@ import (
"github.com/stretchr/testify/require"
)
-const versionTreeID = "version"
-
type dummyNetmapSource struct {
netmap.Source
}
@@ -41,7 +39,7 @@ type dummySubjectProvider struct {
subjects map[util.Uint160]client.SubjectExtended
}
-func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
+func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, error) {
res := s.subjects[addr]
return &client.Subject{
PrimaryKey: res.PrimaryKey,
@@ -52,7 +50,7 @@ func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160)
}, nil
}
-func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
+func (s dummySubjectProvider) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
res := s.subjects[addr]
return &res, nil
}
@@ -67,7 +65,7 @@ func (s dummyEpochSource) CurrentEpoch() uint64 {
type dummyContainerSource map[string]*containercore.Container
-func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
+func (s dummyContainerSource) List() ([]cid.ID, error) {
res := make([]cid.ID, 0, len(s))
var cnr cid.ID
@@ -83,7 +81,7 @@ func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
return res, nil
}
-func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) {
+func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
cnt, ok := s[id.String()]
if !ok {
return nil, errors.New("container not found")
@@ -91,7 +89,7 @@ func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercor
return cnt, nil
}
-func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) {
+func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) {
return &containercore.DelInfo{}, nil
}
@@ -152,7 +150,6 @@ func TestMessageSign(t *testing.T) {
apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}),
}
- s.cfg.authorizedKeys.Store(&[][]byte{})
rawCID1 := make([]byte, sha256.Size)
cid1.Encode(rawCID1)
@@ -171,26 +168,26 @@ func TestMessageSign(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRW)
t.Run("missing signature, no panic", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
})
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op))
t.Run("invalid CID", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
})
cnr.Value.SetBasicACL(acl.Private)
t.Run("extension disabled", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
})
t.Run("invalid key", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op))
})
t.Run("bearer", func(t *testing.T) {
@@ -203,7 +200,7 @@ func TestMessageSign(t *testing.T) {
t.Run("invalid bearer", func(t *testing.T) {
req.Body.BearerToken = []byte{0xFF}
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer CID", func(t *testing.T) {
@@ -212,7 +209,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer owner", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -220,7 +217,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer signature", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -232,112 +229,20 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bv2.StableMarshal(nil)
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- })
-
- t.Run("omit override within bt", func(t *testing.T) {
- t.Run("personated", func(t *testing.T) {
- bt := testBearerTokenNoOverride()
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override")
- })
-
- t.Run("impersonated", func(t *testing.T) {
- bt := testBearerTokenNoOverride()
- bt.SetImpersonate(true)
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- })
- })
-
- t.Run("invalid override within bearer token", func(t *testing.T) {
- t.Run("personated", func(t *testing.T) {
- bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
- })
-
- t.Run("impersonated", func(t *testing.T) {
- bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
- bt.SetImpersonate(true)
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
- })
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("impersonate", func(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRWExtended)
var bt bearer.Token
- bt.SetExp(10)
- bt.SetImpersonate(true)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid1.EncodeToString(),
- },
- Chains: []ape.Chain{},
- })
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
- })
-
- t.Run("impersonate, but target user is still set", func(t *testing.T) {
- var bt bearer.Token
- bt.SetExp(10)
bt.SetImpersonate(true)
- var reqSigner user.ID
- user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey()))
-
- bt.ForUser(reqSigner)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid1.EncodeToString(),
- },
- Chains: []ape.Chain{},
- })
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
- })
-
- t.Run("impersonate but invalid signer", func(t *testing.T) {
- var bt bearer.Token
- bt.SetExp(10)
- bt.SetImpersonate(true)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid1.EncodeToString(),
- },
- Chains: []ape.Chain{},
- })
require.NoError(t, bt.Sign(privs[1].PrivateKey))
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -347,18 +252,18 @@ func TestMessageSign(t *testing.T) {
t.Run("put and get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("only get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[2].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("none", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[3].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
})
}
@@ -377,25 +282,6 @@ func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token
return b
}
-func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token {
- var b bearer.Token
- b.SetExp(currentEpoch + 1)
- b.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- },
- Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
- })
-
- return b
-}
-
-func testBearerTokenNoOverride() bearer.Token {
- var b bearer.Token
- b.SetExp(currentEpoch + 1)
- return b
-}
-
func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain {
ruleGet := chain.Rule{
Status: chain.Allow,
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index af355639f..ce1e72104 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -2,9 +2,7 @@ package tree
import (
"context"
- "crypto/ecdsa"
"crypto/sha256"
- "crypto/tls"
"errors"
"fmt"
"io"
@@ -15,8 +13,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -24,15 +20,12 @@ import (
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@@ -46,7 +39,7 @@ const defaultSyncWorkerCount = 20
// tree IDs from the other container nodes. Returns ErrNotInContainer if the node
// is not included in the container.
func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
- nodes, pos, err := s.getContainerNodes(ctx, cid)
+ nodes, pos, err := s.getContainerNodes(cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -78,8 +71,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
var treesToSync []string
var outErr error
- err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool {
- resp, outErr = c.TreeList(fCtx, req)
+ err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool {
+ resp, outErr = c.TreeList(ctx, req)
if outErr != nil {
return false
}
@@ -99,7 +92,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
for _, tid := range treesToSync {
h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree,
+ s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
continue
@@ -107,7 +100,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
if h < newHeight {
if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
- s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
+ s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
}
@@ -119,7 +112,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
// SynchronizeTree tries to synchronize log starting from the last stored height.
func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error {
- nodes, pos, err := s.getContainerNodes(ctx, cid)
+ nodes, pos, err := s.getContainerNodes(cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -138,9 +131,14 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string
}
// mergeOperationStreams performs merge sort for node operation streams to one stream.
-func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
+func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
defer close(merged)
+ ms := make([]*pilorama.Move, len(streams))
+ for i := range streams {
+ ms[i] = <-streams[i]
+ }
+
// Merging different node streams shuffles incoming operations like that:
//
// x - operation from the stream A
@@ -152,15 +150,6 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
// operation height from the stream B. This height is stored in minStreamedLastHeight.
var minStreamedLastHeight uint64 = math.MaxUint64
- ms := make([]*pilorama.Move, len(streams))
- for i := range streams {
- select {
- case ms[i] = <-streams[i]:
- case <-ctx.Done():
- return minStreamedLastHeight
- }
- }
-
for {
var minTimeMoveTime uint64 = math.MaxUint64
minTimeMoveIndex := -1
@@ -175,11 +164,7 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
break
}
- select {
- case merged <- ms[minTimeMoveIndex]:
- case <-ctx.Done():
- return minStreamedLastHeight
- }
+ merged <- ms[minTimeMoveIndex]
height := ms[minTimeMoveIndex].Time
if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
minStreamedLastHeight = min(minStreamedLastHeight, height)
@@ -191,30 +176,38 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string,
operationStream <-chan *pilorama.Move,
-) (uint64, error) {
+) uint64 {
+ errGroup, _ := errgroup.WithContext(ctx)
+ const workersCount = 1024
+ errGroup.SetLimit(workersCount)
+
+ // We run TreeApply concurrently for the operation batch. Let's consider two operations
+ // in the batch m1 and m2 such that m1.Time < m2.Time. The engine may apply m2 and fail
+ // on m1. That means the service must start sync from m1.Time in the next iteration and
+ // this height is stored in unappliedOperationHeight.
+ var unappliedOperationHeight uint64 = math.MaxUint64
+ var heightMtx sync.Mutex
+
var prev *pilorama.Move
- var batch []*pilorama.Move
for m := range operationStream {
// skip already applied op
if prev != nil && prev.Time == m.Time {
continue
}
prev = m
- batch = append(batch, m)
- if len(batch) == s.syncBatchSize {
- if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
- return batch[0].Time, err
+ errGroup.Go(func() error {
+ if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil {
+ heightMtx.Lock()
+ unappliedOperationHeight = min(unappliedOperationHeight, m.Time)
+ heightMtx.Unlock()
+ return err
}
- batch = batch[:0]
- }
+ return nil
+ })
}
- if len(batch) > 0 {
- if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
- return batch[0].Time, err
- }
- }
- return math.MaxUint64, nil
+ _ = errGroup.Wait()
+ return unappliedOperationHeight
}
func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
@@ -247,14 +240,10 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
Parent: lm.GetParentId(),
Child: lm.GetChildId(),
}
- if err := m.FromBytes(lm.GetMeta()); err != nil {
+ if err := m.Meta.FromBytes(lm.GetMeta()); err != nil {
return err
}
- select {
- case opsCh <- m:
- case <-ctx.Done():
- return ctx.Err()
- }
+ opsCh <- m
}
if !errors.Is(err, io.EOF) {
return err
@@ -270,7 +259,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
treeID string, nodes []netmapSDK.NodeInfo,
) uint64 {
- s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
+ s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
errGroup, egCtx := errgroup.WithContext(ctx)
const workersCount = 1024
@@ -283,14 +272,13 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
merged := make(chan *pilorama.Move)
var minStreamedLastHeight uint64
errGroup.Go(func() error {
- minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged)
+ minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged)
return nil
})
var minUnappliedHeight uint64
errGroup.Go(func() error {
- var err error
- minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged)
- return err
+ minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged)
+ return nil
})
var allNodesSynced atomic.Bool
@@ -299,27 +287,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
for i, n := range nodes {
errGroup.Go(func() error {
var nodeSynced bool
- for addr := range n.NetworkEndpoints() {
+ n.IterateNetworkEndpoints(func(addr string) bool {
var a network.Address
if err := a.FromString(addr); err != nil {
- s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- continue
+ s.log.Warn(logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ return false
}
- cc, err := dialTreeService(ctx, a, s.key, s.ds)
+ cc, err := s.createConnection(a)
if err != nil {
- s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- continue
+ s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ return false
}
+ defer cc.Close()
err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i])
if err != nil {
- s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
+ s.log.Warn(logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
}
nodeSynced = err == nil
- _ = cc.Close()
- break
- }
+ return true
+ })
close(nodeOperationStreams[i])
if !nodeSynced {
allNodesSynced.Store(false)
@@ -329,7 +317,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
}
if err := errGroup.Wait(); err != nil {
allNodesSynced.Store(false)
- s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
+ s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
}
newHeight := minStreamedLastHeight
@@ -344,60 +332,19 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return from
}
-func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) {
- cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer()))
- if err != nil {
- return nil, err
- }
-
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- defer cancel()
-
- req := &HealthcheckRequest{
- Body: &HealthcheckRequest_Body{},
- }
- if err := SignMessage(req, key); err != nil {
- return nil, err
- }
-
- // perform some request to check connection
- if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
- _ = cc.Close()
- return nil, err
- }
- return cc, nil
-}
-
-func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
- host, isTLS, err := client.ParseURI(a.URIAddr())
- if err != nil {
- return nil, err
- }
-
- creds := insecure.NewCredentials()
- if isTLS {
- creds = credentials.NewTLS(&tls.Config{})
- }
-
- defaultOpts := []grpc.DialOption{
+func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
+ return grpc.NewClient(a.URIAddr(),
grpc.WithChainUnaryInterceptor(
- qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
- tracing_grpc.NewUnaryClientInterceptor(),
- tagging.NewUnaryClientInterceptor(),
+ tracing_grpc.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
- qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
metrics.NewStreamClientInterceptor(),
tracing_grpc.NewStreamClientInterceptor(),
- tagging.NewStreamClientInterceptor(),
),
- grpc.WithTransportCredentials(creds),
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- grpc.WithDisableServiceConfig(),
- }
-
- return grpc.NewClient(host, append(defaultOpts, opts...)...)
+ )
}
// ErrAlreadySyncing is returned when a service synchronization has already
@@ -437,25 +384,25 @@ func (s *Service) syncLoop(ctx context.Context) {
return
case <-s.syncChan:
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
- s.log.Info(ctx, logs.TreeSyncingTrees)
+ s.log.Debug(logs.TreeSyncingTrees)
start := time.Now()
- cnrs, err := s.cnrSource.List(ctx)
+ cnrs, err := s.cfg.cnrSource.List()
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
+ s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err))
s.metrics.AddSyncDuration(time.Since(start), false)
span.End()
break
}
- newMap, cnrsToSync := s.containersToSync(ctx, cnrs)
+ newMap, cnrsToSync := s.containersToSync(cnrs)
s.syncContainers(ctx, cnrsToSync)
s.removeContainers(ctx, newMap)
- s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized)
+ s.log.Debug(logs.TreeTreesHaveBeenSynchronized)
s.metrics.AddSyncDuration(time.Since(start), true)
span.End()
@@ -475,19 +422,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
err := s.syncPool.Submit(func() {
defer wg.Done()
- s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
err := s.synchronizeAllTrees(ctx, cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
+ s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
return
}
- s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
})
if err != nil {
wg.Done()
- s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization,
+ s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization,
zap.Stringer("cid", cnr),
zap.Error(err))
if errors.Is(err, ants.ErrPoolClosed) {
@@ -511,9 +458,9 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
continue
}
- existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr)
+ existed, err := containerCore.WasRemoved(s.cnrSource, cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted,
+ s.log.Error(logs.TreeCouldNotCheckIfContainerExisted,
zap.Stringer("cid", cnr),
zap.Error(err))
} else if existed {
@@ -525,25 +472,25 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
}
for _, cnr := range removed {
- s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
err := s.DropTree(ctx, cnr, "")
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree,
+ s.log.Error(logs.TreeCouldNotRemoveRedundantTree,
zap.Stringer("cid", cnr),
zap.Error(err))
}
}
}
-func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
+func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
newMap := make(map[cid.ID]struct{}, len(s.cnrMap))
cnrsToSync := make([]cid.ID, 0, len(cnrs))
for _, cnr := range cnrs {
- _, pos, err := s.getContainerNodes(ctx, cnr)
+ _, pos, err := s.getContainerNodes(cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes,
+ s.log.Error(logs.TreeCouldNotCalculateContainerNodes,
zap.Stringer("cid", cnr),
zap.Error(err))
continue
diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go
index 87d419408..497d90554 100644
--- a/pkg/services/tree/sync_test.go
+++ b/pkg/services/tree/sync_test.go
@@ -1,7 +1,6 @@
package tree
import (
- "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -65,7 +64,7 @@ func Test_mergeOperationStreams(t *testing.T) {
merged := make(chan *pilorama.Move, 1)
min := make(chan uint64)
go func() {
- min <- mergeOperationStreams(context.Background(), nodeOpChans, merged)
+ min <- mergeOperationStreams(nodeOpChans, merged)
}()
var res []uint64
diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go
index 66581878a..547c8d50b 100644
--- a/pkg/util/attributes/parser_test.go
+++ b/pkg/util/attributes/parser_test.go
@@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) {
mExp = mSrc
}
- for key, value := range node.Attributes() {
+ node.IterateAttributes(func(key, value string) {
v, ok := mExp[key]
require.True(t, ok)
require.Equal(t, value, v)
delete(mExp, key)
- }
+ })
require.Empty(t, mExp)
}
diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go
index 8569ec734..a9877e007 100644
--- a/pkg/util/http/calls.go
+++ b/pkg/util/http/calls.go
@@ -32,8 +32,8 @@ func (x *Server) Serve() error {
//
// Once Shutdown has been called on a server, it may not be reused;
// future calls to Serve method will have no effect.
-func (x *Server) Shutdown(ctx context.Context) error {
- ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout)
+func (x *Server) Shutdown() error {
+ ctx, cancel := context.WithTimeout(context.Background(), x.shutdownTimeout)
err := x.srv.Shutdown(ctx)
diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go
index 2589ab786..923412a7f 100644
--- a/pkg/util/http/server.go
+++ b/pkg/util/http/server.go
@@ -76,7 +76,8 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server {
o(c)
}
- if c.shutdownTimeout <= 0 {
+ switch {
+ case c.shutdownTimeout <= 0:
panicOnOptValue("shutdown timeout", c.shutdownTimeout)
}
diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go
index 6337039a9..b2942b52a 100644
--- a/pkg/util/keyer/dashboard.go
+++ b/pkg/util/keyer/dashboard.go
@@ -6,7 +6,6 @@ import (
"os"
"text/tabwriter"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -105,7 +104,9 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) {
func base58ToHex(data string) string {
val, err := base58.Decode(data)
- assert.NoError(err, "produced incorrect base58 value")
+ if err != nil {
+ panic("produced incorrect base58 value")
+ }
return hex.EncodeToString(val)
}
diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go
deleted file mode 100644
index 413b1d9aa..000000000
--- a/pkg/util/logger/log.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package logger
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
- qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "go.uber.org/zap"
-)
-
-func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Debug(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Info(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Warn(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Error(msg, appendContext(ctx, fields...)...)
-}
-
-func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field {
- if traceID := tracing.GetTraceID(ctx); traceID != "" {
- fields = append(fields, zap.String("trace_id", traceID))
- }
- if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined {
- fields = append(fields, zap.String("io_tag", ioTag))
- }
- return fields
-}
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index a1998cb1a..4b60f02de 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -2,7 +2,6 @@ package logger
import (
"fmt"
- "time"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/ssgreg/journald"
@@ -13,10 +12,8 @@ import (
// Logger represents a component
// for writing messages to log.
type Logger struct {
- z *zap.Logger
- c zapcore.Core
- t Tag
- w bool
+ *zap.Logger
+ lvl zap.AtomicLevel
}
// Prm groups Logger's parameters.
@@ -25,8 +22,16 @@ type Logger struct {
// Parameters that have been connected to the Logger support its
// configuration changing.
//
-// See also Logger.Reload, SetLevelString.
+// Passing Prm after a successful connection via the NewLogger, connects
+// the Prm to a new instance of the Logger.
+//
+// See also Reload, SetLevelString.
type Prm struct {
+ // link to the created Logger
+ // instance; used for a runtime
+ // reconfiguration
+ _log *Logger
+
// support runtime rereading
level zapcore.Level
@@ -38,12 +43,6 @@ type Prm struct {
// PrependTimestamp specifies whether to prepend a timestamp in the log
PrependTimestamp bool
-
- // Options for zap.Logger
- Options []zap.Option
-
- // map of tag's bit masks to log level, overrides lvl
- tl map[Tag]zapcore.Level
}
const (
@@ -73,10 +72,20 @@ func (p *Prm) SetDestination(d string) error {
return nil
}
-// SetTags parses list of tags with log level.
-func (p *Prm) SetTags(tags [][]string) (err error) {
- p.tl, err = parseTags(tags)
- return err
+// Reload reloads configuration of a connected instance of the Logger.
+// Returns ErrLoggerNotConnected if no connection has been performed.
+// Returns any reconfiguration error from the Logger directly.
+func (p Prm) Reload() error {
+ if p._log == nil {
+ // incorrect logger usage
+ panic("parameters are not connected to any Logger")
+ }
+
+ return p._log.reload(p)
+}
+
+func defaultPrm() *Prm {
+ return new(Prm)
}
// NewLogger constructs a new zap logger instance. Constructing with nil
@@ -90,7 +99,10 @@ func (p *Prm) SetTags(tags [][]string) (err error) {
// - ISO8601 time encoding.
//
// Logger records a stack trace for all messages at or above fatal level.
-func NewLogger(prm Prm) (*Logger, error) {
+func NewLogger(prm *Prm) (*Logger, error) {
+ if prm == nil {
+ prm = defaultPrm()
+ }
switch prm.dest {
case DestinationUndefined, DestinationStdout:
return newConsoleLogger(prm)
@@ -101,9 +113,11 @@ func NewLogger(prm Prm) (*Logger, error) {
}
}
-func newConsoleLogger(prm Prm) (*Logger, error) {
+func newConsoleLogger(prm *Prm) (*Logger, error) {
+ lvl := zap.NewAtomicLevelAt(prm.level)
+
c := zap.NewProductionConfig()
- c.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
+ c.Level = lvl
c.Encoding = "console"
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
@@ -115,23 +129,25 @@ func newConsoleLogger(prm Prm) (*Logger, error) {
c.EncoderConfig.TimeKey = ""
}
- opts := []zap.Option{
+ lZap, err := c.Build(
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- zap.AddCallerSkip(1),
- }
- opts = append(opts, prm.Options...)
- lZap, err := c.Build(opts...)
+ )
if err != nil {
return nil, err
}
- l := &Logger{z: lZap, c: lZap.Core()}
- l = l.WithTag(TagMain)
+
+ l := &Logger{Logger: lZap, lvl: lvl}
+ prm._log = l
return l, nil
}
-func newJournaldLogger(prm Prm) (*Logger, error) {
+func newJournaldLogger(prm *Prm) (*Logger, error) {
+ lvl := zap.NewAtomicLevelAt(prm.level)
+
c := zap.NewProductionConfig()
+ c.Level = lvl
+ c.Encoding = "console"
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
@@ -144,100 +160,22 @@ func newJournaldLogger(prm Prm) (*Logger, error) {
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
- core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields)
+ core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
coreWithContext := core.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
})
- var samplerOpts []zapcore.SamplerOption
- if c.Sampling.Hook != nil {
- samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook))
- }
- samplingCore := zapcore.NewSamplerWithOptions(
- coreWithContext,
- time.Second,
- c.Sampling.Initial,
- c.Sampling.Thereafter,
- samplerOpts...,
- )
- opts := []zap.Option{
- zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- zap.AddCallerSkip(1),
- }
- opts = append(opts, prm.Options...)
- lZap := zap.New(samplingCore, opts...)
- l := &Logger{z: lZap, c: lZap.Core()}
- l = l.WithTag(TagMain)
+ lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
+
+ l := &Logger{Logger: lZap, lvl: lvl}
+ prm._log = l
return l, nil
}
-// With create a child logger with new fields, don't affect the parent.
-// Throws panic if tag is unset.
-func (l *Logger) With(fields ...zap.Field) *Logger {
- if l.t == 0 {
- panic("tag is unset")
- }
- c := *l
- c.z = l.z.With(fields...)
- // With called under the logger
- c.w = true
- return &c
-}
-
-type core struct {
- c zapcore.Core
- l zap.AtomicLevel
-}
-
-func (c *core) Enabled(lvl zapcore.Level) bool {
- return c.l.Enabled(lvl)
-}
-
-func (c *core) With(fields []zapcore.Field) zapcore.Core {
- clone := *c
- clone.c = clone.c.With(fields)
- return &clone
-}
-
-func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
- return c.c.Check(e, ce)
-}
-
-func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error {
- return c.c.Write(e, fields)
-}
-
-func (c *core) Sync() error {
- return c.c.Sync()
-}
-
-// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger.
-// Throws panic if provided unsupported tag.
-func (l *Logger) WithTag(tag Tag) *Logger {
- if tag == 0 || tag > Tag(len(_Tag_index)-1) {
- panic("unsupported tag " + tag.String())
- }
- if l.w {
- panic("unsupported operation for the logger's state")
- }
- c := *l
- c.t = tag
- c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core {
- return &core{
- c: l.c.With([]zap.Field{zap.String("tag", tag.String())}),
- l: tagToLogLevel[tag],
- }
- }))
- return &c
-}
-
-func NewLoggerWrapper(z *zap.Logger) *Logger {
- return &Logger{
- z: z.WithOptions(zap.AddCallerSkip(1)),
- t: TagMain,
- c: z.Core(),
- }
+func (l *Logger) reload(prm Prm) error {
+ l.lvl.SetLevel(prm.level)
+ return nil
}
diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go
deleted file mode 100644
index b867ee6cc..000000000
--- a/pkg/util/logger/logger_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package logger
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/require"
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest/observer"
-)
-
-func BenchmarkLogger(b *testing.B) {
- ctx := context.Background()
- m := map[string]Prm{}
-
- prm := Prm{}
- require.NoError(b, prm.SetLevelString("debug"))
- m["logging enabled"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("error"))
- m["logging disabled"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("error"))
- require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}}))
- m["logging enabled via tags"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("debug"))
- require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}}))
- m["logging disabled via tags"] = prm
-
- for k, v := range m {
- b.Run(k, func(b *testing.B) {
- logger, err := createLogger(v)
- require.NoError(b, err)
- UpdateLevelForTags(v)
- b.ResetTimer()
- b.ReportAllocs()
- for range b.N {
- logger.Info(ctx, "test info")
- }
- })
- }
-}
-
-type testCore struct {
- core zapcore.Core
-}
-
-func (c *testCore) Enabled(lvl zapcore.Level) bool {
- return c.core.Enabled(lvl)
-}
-
-func (c *testCore) With(fields []zapcore.Field) zapcore.Core {
- c.core = c.core.With(fields)
- return c
-}
-
-func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
- return ce.AddCore(e, c)
-}
-
-func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error {
- return nil
-}
-
-func (c *testCore) Sync() error {
- return c.core.Sync()
-}
-
-func createLogger(prm Prm) (*Logger, error) {
- prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
- tc := testCore{core: core}
- return &tc
- })}
- return NewLogger(prm)
-}
-
-func TestLoggerOutput(t *testing.T) {
- obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel))
-
- prm := Prm{}
- require.NoError(t, prm.SetLevelString("debug"))
- prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core {
- return obs
- })}
- loggerMain, err := NewLogger(prm)
- require.NoError(t, err)
- UpdateLevelForTags(prm)
-
- loggerMainWith := loggerMain.With(zap.String("key", "value"))
-
- require.Panics(t, func() {
- loggerMainWith.WithTag(TagShard)
- })
- loggerShard := loggerMain.WithTag(TagShard)
- loggerShard = loggerShard.With(zap.String("key1", "value1"))
-
- loggerMorph := loggerMain.WithTag(TagMorph)
- loggerMorph = loggerMorph.With(zap.String("key2", "value2"))
-
- ctx := context.Background()
- loggerMain.Debug(ctx, "main")
- loggerMainWith.Debug(ctx, "main with")
- loggerShard.Debug(ctx, "shard")
- loggerMorph.Debug(ctx, "morph")
-
- require.Len(t, logs.All(), 4)
- require.Len(t, logs.FilterFieldKey("key").All(), 1)
- require.Len(t, logs.FilterFieldKey("key1").All(), 1)
- require.Len(t, logs.FilterFieldKey("key2").All(), 1)
- require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2)
- require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1)
- require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1)
-}
diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result
deleted file mode 100644
index 612fa2967..000000000
--- a/pkg/util/logger/logger_test.result
+++ /dev/null
@@ -1,46 +0,0 @@
-goos: linux
-goarch: amd64
-pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger
-cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
-BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op
-PASS
-ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s
diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go
deleted file mode 100644
index 1b98f2e62..000000000
--- a/pkg/util/logger/tag_string.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT.
-
-package logger
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[TagMain-1]
- _ = x[TagMorph-2]
- _ = x[TagGrpcSvc-3]
- _ = x[TagIr-4]
- _ = x[TagProcessor-5]
- _ = x[TagEngine-6]
- _ = x[TagBlobovnicza-7]
- _ = x[TagBlobovniczaTree-8]
- _ = x[TagBlobstor-9]
- _ = x[TagFSTree-10]
- _ = x[TagGC-11]
- _ = x[TagShard-12]
- _ = x[TagWriteCache-13]
- _ = x[TagDeleteSvc-14]
- _ = x[TagGetSvc-15]
- _ = x[TagSearchSvc-16]
- _ = x[TagSessionSvc-17]
- _ = x[TagTreeSvc-18]
- _ = x[TagPolicer-19]
- _ = x[TagReplicator-20]
-}
-
-const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator"
-
-var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148}
-
-func (i Tag) String() string {
- i -= 1
- if i >= Tag(len(_Tag_index)-1) {
- return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")"
- }
- return _Tag_name[_Tag_index[i]:_Tag_index[i+1]]
-}
diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go
deleted file mode 100644
index a5386707e..000000000
--- a/pkg/util/logger/tags.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package logger
-
-import (
- "fmt"
- "strings"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-//go:generate stringer -type Tag -linecomment
-
-type Tag uint8
-
-const (
- _ Tag = iota //
- TagMain // main
- TagMorph // morph
- TagGrpcSvc // grpcsvc
- TagIr // ir
- TagProcessor // processor
- TagEngine // engine
- TagBlobovnicza // blobovnicza
- TagBlobovniczaTree // blobovniczatree
- TagBlobstor // blobstor
- TagFSTree // fstree
- TagGC // gc
- TagShard // shard
- TagWriteCache // writecache
- TagDeleteSvc // deletesvc
- TagGetSvc // getsvc
- TagSearchSvc // searchsvc
- TagSessionSvc // sessionsvc
- TagTreeSvc // treesvc
- TagPolicer // policer
- TagReplicator // replicator
-
- defaultLevel = zapcore.InfoLevel
-)
-
-var (
- tagToLogLevel = map[Tag]zap.AtomicLevel{}
- stringToTag = map[string]Tag{}
-)
-
-func init() {
- for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ {
- tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel)
- stringToTag[i.String()] = i
- }
-}
-
-// parseTags returns:
-// - map(always instantiated) of tag to custom log level for that tag;
-// - error if it occurred(map is empty).
-func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) {
- m := make(map[Tag]zapcore.Level)
- if len(raw) == 0 {
- return m, nil
- }
- for _, item := range raw {
- str, level := item[0], item[1]
- if len(level) == 0 {
- // It is not necessary to parse tags without level,
- // because default log level will be used.
- continue
- }
- var l zapcore.Level
- err := l.UnmarshalText([]byte(level))
- if err != nil {
- return nil, err
- }
- tmp := strings.Split(str, ",")
- for _, tagStr := range tmp {
- tag, ok := stringToTag[strings.TrimSpace(tagStr)]
- if !ok {
- return nil, fmt.Errorf("unsupported tag %s", str)
- }
- m[tag] = l
- }
- }
- return m, nil
-}
-
-func UpdateLevelForTags(prm Prm) {
- for k, v := range tagToLogLevel {
- nk, ok := prm.tl[k]
- if ok {
- v.SetLevel(nk)
- } else {
- v.SetLevel(prm.level)
- }
- }
-}
diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go
index b5b0a31eb..f93756d17 100644
--- a/pkg/util/logger/test/logger.go
+++ b/pkg/util/logger/test/logger.go
@@ -11,10 +11,9 @@ import (
// NewLogger creates a new logger.
func NewLogger(t testing.TB) *logger.Logger {
- return logger.NewLoggerWrapper(
- zaptest.NewLogger(t,
- zaptest.Level(zapcore.DebugLevel),
- zaptest.WrapOptions(zap.Development(), zap.AddCaller()),
- ),
- )
+ var l logger.Logger
+ l.Logger = zaptest.NewLogger(t,
+ zaptest.Level(zapcore.DebugLevel),
+ zaptest.WrapOptions(zap.Development(), zap.AddCaller()))
+ return &l
}
diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go
deleted file mode 100644
index 7373e538f..000000000
--- a/pkg/util/testing/netmap_source.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package testing
-
-import (
- "context"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-var (
- errInvalidDiff = errors.New("invalid diff")
- errNetmapNotFound = errors.New("netmap not found")
-)
-
-type TestNetmapSource struct {
- Netmaps map[uint64]*netmap.NetMap
- CurrentEpoch uint64
-}
-
-func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
- if diff >= s.CurrentEpoch {
- return nil, errInvalidDiff
- }
- return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff)
-}
-
-func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) {
- if nm, found := s.Netmaps[epoch]; found {
- return nm, nil
- }
- return nil, errNetmapNotFound
-}
-
-func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) {
- return s.CurrentEpoch, nil
-}
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
index 39a420358..f2f8881cf 100644
--- a/scripts/populate-metabase/internal/generate.go
+++ b/scripts/populate-metabase/internal/generate.go
@@ -1,10 +1,8 @@
package internal
import (
- cryptorand "crypto/rand"
"crypto/sha256"
"fmt"
- "math/rand"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -16,13 +14,14 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "golang.org/x/exp/rand"
)
func GeneratePayloadPool(count uint, size uint) [][]byte {
var pool [][]byte
for range count {
payload := make([]byte, size)
- _, _ = cryptorand.Read(payload)
+ _, _ = rand.Read(payload)
pool = append(pool, payload)
}
diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go
index fafe61eaa..4da23a295 100644
--- a/scripts/populate-metabase/internal/populate.go
+++ b/scripts/populate-metabase/internal/populate.go
@@ -31,10 +31,13 @@ func PopulateWithObjects(
for range count {
obj := factory()
- id := fmt.Appendf(nil, "%c/%c/%c",
+
+ id := []byte(fmt.Sprintf(
+ "%c/%c/%c",
digits[rand.Int()%len(digits)],
digits[rand.Int()%len(digits)],
- digits[rand.Int()%len(digits)])
+ digits[rand.Int()%len(digits)],
+ ))
prm := meta.PutPrm{}
prm.SetObject(obj)
diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go
index 8c4ea41ad..6f6b233cf 100644
--- a/scripts/populate-metabase/main.go
+++ b/scripts/populate-metabase/main.go
@@ -91,15 +91,15 @@ func populate() (err error) {
return fmt.Errorf("couldn't open the metabase: %w", err)
}
defer func() {
- if errOnClose := db.Close(ctx); errOnClose != nil {
+ if errOnClose := db.Close(); errOnClose != nil {
err = errors.Join(
err,
- fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)),
+ fmt.Errorf("couldn't close the metabase: %w", db.Close()),
)
}
}()
- if err = db.Init(ctx); err != nil {
+ if err = db.Init(); err != nil {
return fmt.Errorf("couldn't init the metabase: %w", err)
}