diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile new file mode 100644 index 000000000..4234de160 --- /dev/null +++ b/.ci/Jenkinsfile @@ -0,0 +1,81 @@ +def golang = ['1.23', '1.24'] +def golangDefault = "golang:${golang.last()}" + +async { + + for (version in golang) { + def go = version + + task("test/go${go}") { + container("golang:${go}") { + sh 'make test' + } + } + + task("build/go${go}") { + container("golang:${go}") { + for (app in ['cli', 'node', 'ir', 'adm', 'lens']) { + sh """ + make bin/frostfs-${app} + bin/frostfs-${app} --version + """ + } + } + } + } + + task('test/race') { + container(golangDefault) { + sh 'make test GOFLAGS="-count=1 -race"' + } + } + + task('lint') { + container(golangDefault) { + sh 'make lint-install lint' + } + } + + task('staticcheck') { + container(golangDefault) { + sh 'make staticcheck-install staticcheck-run' + } + } + + task('gopls') { + container(golangDefault) { + sh 'make gopls-install gopls-run' + } + } + + task('gofumpt') { + container(golangDefault) { + sh ''' + make fumpt-install + make fumpt + git diff --exit-code --quiet + ''' + } + } + + task('vulncheck') { + container(golangDefault) { + sh ''' + go install golang.org/x/vuln/cmd/govulncheck@latest + govulncheck ./... + ''' + } + } + + task('pre-commit') { + dockerfile(""" + FROM ${golangDefault} + RUN apt update && \ + apt install -y --no-install-recommends pre-commit + """) { + withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { + sh 'pre-commit run --color=always --hook-stage=manual --all-files' + } + } + } +} diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.forgejo/ISSUE_TEMPLATE/bug_report.md similarity index 100% rename from .github/ISSUE_TEMPLATE/bug_report.md rename to .forgejo/ISSUE_TEMPLATE/bug_report.md diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.forgejo/ISSUE_TEMPLATE/config.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/config.yml rename to .forgejo/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.forgejo/ISSUE_TEMPLATE/feature_request.md similarity index 100% rename from .github/ISSUE_TEMPLATE/feature_request.md rename to .forgejo/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/logo.svg b/.forgejo/logo.svg similarity index 100% rename from .github/logo.svg rename to .forgejo/logo.svg diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml index ce2d64dd9..d568b9607 100644 --- a/.forgejo/workflows/build.yml +++ b/.forgejo/workflows/build.yml @@ -1,6 +1,10 @@ name: Build -on: [pull_request] +on: + pull_request: + push: + branches: + - master jobs: build: @@ -8,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] steps: - uses: actions/checkout@v3 diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml index 7c5af8410..190d7764a 100644 --- a/.forgejo/workflows/dco.yml +++ b/.forgejo/workflows/dco.yml @@ -13,7 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' - name: Run commit format checker uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml new file mode 100644 index 000000000..fe91d65f9 --- /dev/null +++ b/.forgejo/workflows/oci-image.yml @@ -0,0 +1,28 @@ +name: OCI image + +on: + push: + workflow_dispatch: + +jobs: + image: + name: Build container images + runs-on: docker + container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm + steps: + - name: Clone git repo + uses: actions/checkout@v3 + + - name: Build OCI image + run: make images + + - name: Push image to OCI registry + run: | + echo "$REGISTRY_PASSWORD" \ + | docker login --username truecloudlab --password-stdin git.frostfs.info + make push-images + if: >- + startsWith(github.ref, 'refs/tags/v') && + (github.event_name == 'workflow_dispatch' || github.event_name == 'push') + env: + REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}} diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml index 8b06a2fdf..c2e293175 100644 --- a/.forgejo/workflows/pre-commit.yml +++ b/.forgejo/workflows/pre-commit.yml @@ -1,5 +1,10 @@ name: Pre-commit hooks -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: precommit: @@ -16,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.23 + go-version: 1.24 - name: Set up Python run: | apt update diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index 07ba5c268..f3f5432ce 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -1,5 +1,10 @@ name: Tests and linters -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: lint: @@ -11,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install linters @@ -25,7 +30,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] fail-fast: false steps: - uses: actions/checkout@v3 @@ -48,7 +53,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' cache: true - name: Run tests @@ -63,7 +68,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install staticcheck @@ -99,7 +104,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install gofumpt diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 2951a8059..bc94792d8 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -1,5 +1,10 @@ name: Vulncheck -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: vulncheck: @@ -13,7 +18,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' + check-latest: true - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.golangci.yml b/.golangci.yml index 57e3b4494..e3ec09f60 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,93 +1,107 @@ -# This file contains all available configuration options -# with their default values. - -# options for analysis running +version: "2" run: - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 20m - - # include test files or not, default is true tests: false - -# output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" formats: - - format: tab - -# all available settings of specific linters -linters-settings: - exhaustive: - # indicates that switch statements are to be considered exhaustive if a - # 'default' case is present, even if all enum members aren't listed in the - # switch - default-signifies-exhaustive: true - govet: - # report about shadowed variables - check-shadowing: false - staticcheck: - checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. - funlen: - lines: 80 # default 60 - statements: 60 # default 40 - gocognit: - min-complexity: 40 # default 30 - importas: - no-unaliased: true - no-extra-aliases: false - alias: - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - unused: - field-writes-are-uses: false - exported-fields-are-used: false - local-variables-are-used: false - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - target-methods : ["reportFlushError", "reportError"] - disable-packages: ["codes", "err", "res","exec"] - constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - + tab: + path: stdout + colors: false linters: + default: none enable: - # mandatory linters - - govet - - revive - - # some default golangci-lint linters - - errcheck - - gosimple - - godot - - ineffassign - - staticcheck - - typecheck - - unused - - # extra linters - bidichk - - durationcheck - - exhaustive - - copyloopvar - - gofmt - - goimports - - misspell - - predeclared - - reassign - - whitespace - containedctx + - contextcheck + - copyloopvar + - durationcheck + - errcheck + - exhaustive - funlen - gocognit - - contextcheck + - gocritic + - godot - importas - - truecloudlab-linters - - perfsprint - - testifylint - - protogetter + - ineffassign - intrange - - tenv - disable-all: true - fast: false + - misspell + - perfsprint + - predeclared + - protogetter + - reassign + - revive + - staticcheck + - testifylint + - truecloudlab-linters + - unconvert + - unparam + - unused + - usetesting + - whitespace + settings: + exhaustive: + default-signifies-exhaustive: true + funlen: + lines: 80 + statements: 60 + gocognit: + min-complexity: 40 + gocritic: + disabled-checks: + - ifElseChain + importas: + alias: + - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object + alias: objectSDK + no-unaliased: true + no-extra-aliases: false + staticcheck: + checks: + - all + - -QF1002 + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + custom: + truecloudlab-linters: + path: bin/linters/external_linters.so + original-url: git.frostfs.info/TrueCloudLab/linters.git + settings: + noliteral: + constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs + disable-packages: + - codes + - err + - res + - exec + target-methods: + - reportFlushError + - reportError + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gci + - gofmt + - goimports + settings: + gci: + sections: + - standard + - default + custom-order: true + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/CHANGELOG.md b/CHANGELOG.md index e4ba6a5d6..92c84ab16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,30 @@ Changelog for FrostFS Node ### Removed ### Updated +## [v0.44.0] - 2024-25-11 - Rongbuk + +### Added +- Allow to prioritize nodes during GET traversal via attributes (#1439) +- Add metrics for the frostfsid cache (#1464) +- Customize constant attributes attached to every tracing span (#1488) +- Manage additional keys in the `frostfsid` contract (#1505) +- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519) + +### Changed +- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396) +- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515) + +### Fixed +- Fix EC object search (#1408) +- Fix EC object put when one of the nodes is unavailable (#1427) + +### Removed +- Drop most of the eACL-related code (#1425) +- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483) + +### Upgrading from v0.43.0 +The metabase schema has changed completely, resync is required. + ## [v0.42.0] ### Added diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..d19c96a5c --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers +.forgejo/.* @potyarkin +Makefile @potyarkin diff --git a/Makefile b/Makefile index 68a31febe..575eaae6f 100755 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ #!/usr/bin/make -f SHELL = bash +.SHELLFLAGS = -euo pipefail -c REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") @@ -7,16 +8,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" -GO_VERSION ?= 1.22 -LINT_VERSION ?= 1.61.0 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.7 +GO_VERSION ?= 1.23 +LINT_VERSION ?= 2.0.2 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 ifeq ($(shell uname), Linux) PROTOC_OS_VERSION=linux-x86_64 endif -STATICCHECK_VERSION ?= 2024.1.1 +STATICCHECK_VERSION ?= 2025.1.1 ARCH = amd64 BIN = bin @@ -42,7 +43,7 @@ GOFUMPT_VERSION ?= v0.7.0 GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) -GOPLS_VERSION ?= v0.15.1 +GOPLS_VERSION ?= v0.17.1 GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_TEMP_FILE := $(shell mktemp) @@ -115,7 +116,7 @@ protoc: # Install protoc protoc-install: @rm -rf $(PROTOBUF_DIR) - @mkdir $(PROTOBUF_DIR) + @mkdir -p $(PROTOBUF_DIR) @echo "⇒ Installing protoc... " @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @@ -139,6 +140,15 @@ images: image-storage image-ir image-cli image-adm # Build dirty local Docker images dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm +# Push FrostFS components' docker image to the registry +push-image-%: + @echo "⇒ Publish FrostFS $* docker image " + @docker push $(HUB_IMAGE)-$*:$(HUB_TAG) + +# Push all Docker images to the registry +.PHONY: push-images +push-images: push-image-storage push-image-ir push-image-cli push-image-adm + # Run `make %` in Golang container docker/%: docker run --rm -t \ @@ -160,7 +170,7 @@ imports: # Install gofumpt fumpt-install: @rm -rf $(GOFUMPT_DIR) - @mkdir $(GOFUMPT_DIR) + @mkdir -p $(GOFUMPT_DIR) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) # Run gofumpt @@ -177,21 +187,44 @@ test: @echo "⇒ Running go test" @GOFLAGS="$(GOFLAGS)" go test ./... +# Install Gerrit commit-msg hook +review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks +review-install: + @git config remote.review.url \ + || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node + @mkdir -p $(GIT_HOOK_DIR)/ + @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg + @chmod +x $(GIT_HOOK_DIR)/commit-msg + @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg + @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg + +# Create a PR in Gerrit +review: BRANCH ?= master +review: + @git push review HEAD:refs/for/$(BRANCH) \ + --push-option r=e.stratonikov@yadro.com \ + --push-option r=d.stepanov@yadro.com \ + --push-option r=an.nikiforov@yadro.com \ + --push-option r=a.arifullin@yadro.com \ + --push-option r=ekaterina.lebedeva@yadro.com \ + --push-option r=a.savchuk@yadro.com \ + --push-option r=a.chuprov@yadro.com + # Run pre-commit pre-commit-run: @pre-commit run -a --hook-stage manual # Install linters -lint-install: +lint-install: $(BIN) @rm -rf $(OUTPUT_LINT_DIR) - @mkdir $(OUTPUT_LINT_DIR) + @mkdir -p $(OUTPUT_LINT_DIR) @mkdir -p $(TMP_DIR) @rm -rf $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: @@ -203,7 +236,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir $(STATICCHECK_DIR) + @mkdir -p $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -216,7 +249,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir $(GOPLS_DIR) + @mkdir -p $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls @@ -270,10 +303,12 @@ env-up: all echo "Frostfs contracts not found"; exit 1; \ fi ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH} - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0 + ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \ + --storage-wallet ./dev/storage/wallet01.json \ + --storage-wallet ./dev/storage/wallet02.json \ + --storage-wallet ./dev/storage/wallet03.json \ + --storage-wallet ./dev/storage/wallet04.json + @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \ make locode-download; \ fi @@ -282,7 +317,6 @@ env-up: all # Shutdown dev environment env-down: - docker compose -f dev/docker-compose.yml down - docker volume rm -f frostfs-node_neo-go + docker compose -f dev/docker-compose.yml down -v rm -rf ./$(TMP_DIR)/state rm -rf ./$(TMP_DIR)/storage diff --git a/README.md b/README.md index 47d812b18..0109ed0e5 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@
-
+
@@ -98,7 +98,7 @@ See `frostfs-contract`'s README.md for build instructions.
4. To create container and put object into it run (container and object IDs will be different):
```
-./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --basic-acl public-read-write --await
+./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --await
Enter password > <- press ENTER, the is no password for wallet
CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju
diff --git a/VERSION b/VERSION
index 01efe7f3a..9052dab96 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-v0.42.0
+v0.44.0
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index 81395edb0..f194e97f5 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -16,10 +16,18 @@ const (
EndpointFlagDesc = "N3 RPC node endpoint"
EndpointFlagShort = "r"
+ WalletPath = "wallet"
+ WalletPathShorthand = "w"
+ WalletPathUsage = "Path to the wallet"
+
AlphabetWalletsFlag = "alphabet-wallets"
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
+ AdminWalletPath = "wallet-admin"
+ AdminWalletUsage = "Path to the admin wallet"
+
LocalDumpFlag = "local-dump"
+ ProtoConfigPath = "protocol"
ContractsInitFlag = "contracts"
ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)"
ContractsURLFlag = "contracts-url"
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go
new file mode 100644
index 000000000..d67b70d2a
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/root.go
@@ -0,0 +1,15 @@
+package maintenance
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie"
+ "github.com/spf13/cobra"
+)
+
+var RootCmd = &cobra.Command{
+ Use: "maintenance",
+ Short: "Section for maintenance commands",
+}
+
+func init() {
+ RootCmd.AddCommand(zombie.Cmd)
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
new file mode 100644
index 000000000..1b66889aa
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
@@ -0,0 +1,70 @@
+package zombie
+
+import (
+ "crypto/ecdsa"
+ "fmt"
+ "os"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "github.com/nspcc-dev/neo-go/cli/flags"
+ "github.com/nspcc-dev/neo-go/cli/input"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey {
+ keyDesc := viper.GetString(walletFlag)
+ if keyDesc == "" {
+ return &nodeconfig.Key(appCfg).PrivateKey
+ }
+ data, err := os.ReadFile(keyDesc)
+ commonCmd.ExitOnErr(cmd, "open wallet file: %w", err)
+
+ priv, err := keys.NewPrivateKeyFromBytes(data)
+ if err != nil {
+ w, err := wallet.NewWalletFromFile(keyDesc)
+ commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err)
+ return fromWallet(cmd, w, viper.GetString(addressFlag))
+ }
+ return &priv.PrivateKey
+}
+
+func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey {
+ var (
+ addr util.Uint160
+ err error
+ )
+
+ if addrStr == "" {
+ addr = w.GetChangeAddress()
+ } else {
+ addr, err = flags.ParseAddress(addrStr)
+ commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err)
+ }
+
+ acc := w.GetAccount(addr)
+ if acc == nil {
+ commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr))
+ }
+
+ pass, err := getPassword()
+ commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err)
+
+ commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams()))
+
+ return &acc.PrivateKey().PrivateKey
+}
+
+func getPassword() (string, error) {
+ // this check allows empty passwords
+ if viper.IsSet("password") {
+ return viper.GetString("password"), nil
+ }
+
+ return input.ReadPassword("Enter password > ")
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
new file mode 100644
index 000000000..f73f33db9
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
@@ -0,0 +1,31 @@
+package zombie
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+func list(cmd *cobra.Command, _ []string) {
+ configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
+ configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ storageEngine := newEngine(cmd, appCfg)
+ q := createQuarantine(cmd, storageEngine.DumpInfo())
+ var containerID *cid.ID
+ if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" {
+ containerID = &cid.ID{}
+ commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
+ }
+
+ commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error {
+ if containerID != nil && a.Container() != *containerID {
+ return nil
+ }
+ cmd.Println(a.EncodeToString())
+ return nil
+ }))
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
new file mode 100644
index 000000000..cd3a64499
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
@@ -0,0 +1,46 @@
+package zombie
+
+import (
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "github.com/spf13/cobra"
+)
+
+func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client {
+ addresses := morphconfig.RPCEndpoint(appCfg)
+ if len(addresses) == 0 {
+ commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found"))
+ }
+ key := nodeconfig.Key(appCfg)
+ cli, err := client.New(cmd.Context(),
+ key,
+ client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
+ client.WithEndpoints(addresses...),
+ client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
+ )
+ commonCmd.ExitOnErr(cmd, "create morph client: %w", err)
+ return cli
+}
+
+func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client {
+ hs, err := morph.NNSContractAddress(client.NNSContainerContractName)
+ commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err)
+ cc, err := cntClient.NewFromMorph(morph, hs, 0)
+ commonCmd.ExitOnErr(cmd, "create morph container client: %w", err)
+ return cc
+}
+
+func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client {
+ hs, err := morph.NNSContractAddress(client.NNSNetmapContractName)
+ commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err)
+ cli, err := netmapClient.NewFromMorph(morph, hs, 0)
+ commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err)
+ return cli
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
new file mode 100644
index 000000000..27f83aec7
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
@@ -0,0 +1,154 @@
+package zombie
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+type quarantine struct {
+ // mtx protects current field.
+ mtx sync.Mutex
+ current int
+ trees []*fstree.FSTree
+}
+
+func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine {
+ var paths []string
+ for _, sh := range engineInfo.Shards {
+ var storagePaths []string
+ for _, st := range sh.BlobStorInfo.SubStorages {
+ storagePaths = append(storagePaths, st.Path)
+ }
+ if len(storagePaths) == 0 {
+ continue
+ }
+ paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine"))
+ }
+ q, err := newQuarantine(paths)
+ commonCmd.ExitOnErr(cmd, "create quarantine: %w", err)
+ return q
+}
+
+func commonPath(paths []string) string {
+ if len(paths) == 0 {
+ return ""
+ }
+ if len(paths) == 1 {
+ return paths[0]
+ }
+ minLen := math.MaxInt
+ for _, p := range paths {
+ if len(p) < minLen {
+ minLen = len(p)
+ }
+ }
+
+ var sb strings.Builder
+ for i := range minLen {
+ for _, path := range paths[1:] {
+ if paths[0][i] != path[i] {
+ return sb.String()
+ }
+ }
+ sb.WriteByte(paths[0][i])
+ }
+ return sb.String()
+}
+
+func newQuarantine(paths []string) (*quarantine, error) {
+ var q quarantine
+ for i := range paths {
+ f := fstree.New(
+ fstree.WithDepth(1),
+ fstree.WithDirNameLen(1),
+ fstree.WithPath(paths[i]),
+ fstree.WithPerm(os.ModePerm),
+ )
+ if err := f.Open(mode.ComponentReadWrite); err != nil {
+ return nil, fmt.Errorf("open fstree %s: %w", paths[i], err)
+ }
+ if err := f.Init(); err != nil {
+ return nil, fmt.Errorf("init fstree %s: %w", paths[i], err)
+ }
+ q.trees = append(q.trees, f)
+ }
+ return &q, nil
+}
+
+func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
+ for i := range q.trees {
+ res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a})
+ if err != nil {
+ continue
+ }
+ return res.Object, nil
+ }
+ return nil, &apistatus.ObjectNotFound{}
+}
+
+func (q *quarantine) Delete(ctx context.Context, a oid.Address) error {
+ for i := range q.trees {
+ _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a})
+ if err != nil {
+ continue
+ }
+ return nil
+ }
+ return &apistatus.ObjectNotFound{}
+}
+
+func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error {
+ data, err := obj.Marshal()
+ if err != nil {
+ return err
+ }
+
+ var prm common.PutPrm
+ prm.Address = objectcore.AddressOf(obj)
+ prm.Object = obj
+ prm.RawData = data
+
+ q.mtx.Lock()
+ current := q.current
+ q.current = (q.current + 1) % len(q.trees)
+ q.mtx.Unlock()
+
+ _, err = q.trees[current].Put(ctx, prm)
+ return err
+}
+
+func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error {
+ var prm common.IteratePrm
+ prm.Handler = func(elem common.IterationElement) error {
+ return f(elem.Address)
+ }
+ for i := range q.trees {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ _, err := q.trees[i].Iterate(ctx, prm)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
new file mode 100644
index 000000000..0b8f2f172
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
@@ -0,0 +1,55 @@
+package zombie
+
+import (
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+func remove(cmd *cobra.Command, _ []string) {
+ configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
+ configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ storageEngine := newEngine(cmd, appCfg)
+ q := createQuarantine(cmd, storageEngine.DumpInfo())
+
+ var containerID cid.ID
+ cidStr, _ := cmd.Flags().GetString(cidFlag)
+ commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
+
+ var objectID *oid.ID
+ oidStr, _ := cmd.Flags().GetString(oidFlag)
+ if oidStr != "" {
+ objectID = &oid.ID{}
+ commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
+ }
+
+ if objectID != nil {
+ var addr oid.Address
+ addr.SetContainer(containerID)
+ addr.SetObject(*objectID)
+ removeObject(cmd, q, addr)
+ } else {
+ commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
+ if addr.Container() != containerID {
+ return nil
+ }
+ removeObject(cmd, q, addr)
+ return nil
+ }))
+ }
+}
+
+func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) {
+ err := q.Delete(cmd.Context(), addr)
+ if errors.Is(err, new(apistatus.ObjectNotFound)) {
+ return
+ }
+ commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err)
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
new file mode 100644
index 000000000..f179c7c2d
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
@@ -0,0 +1,69 @@
+package zombie
+
+import (
+ "crypto/sha256"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+func restore(cmd *cobra.Command, _ []string) {
+ configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
+ configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ storageEngine := newEngine(cmd, appCfg)
+ q := createQuarantine(cmd, storageEngine.DumpInfo())
+ morphClient := createMorphClient(cmd, appCfg)
+ cnrCli := createContainerClient(cmd, morphClient)
+
+ var containerID cid.ID
+ cidStr, _ := cmd.Flags().GetString(cidFlag)
+ commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
+
+ var objectID *oid.ID
+ oidStr, _ := cmd.Flags().GetString(oidFlag)
+ if oidStr != "" {
+ objectID = &oid.ID{}
+ commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
+ }
+
+ if objectID != nil {
+ var addr oid.Address
+ addr.SetContainer(containerID)
+ addr.SetObject(*objectID)
+ restoreObject(cmd, storageEngine, q, addr, cnrCli)
+ } else {
+ commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
+ if addr.Container() != containerID {
+ return nil
+ }
+ restoreObject(cmd, storageEngine, q, addr, cnrCli)
+ return nil
+ }))
+ }
+}
+
+func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) {
+ obj, err := q.Get(cmd.Context(), addr)
+ commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err)
+ rawCID := make([]byte, sha256.Size)
+
+ cid := addr.Container()
+ cid.Encode(rawCID)
+ cnr, err := cnrCli.Get(cmd.Context(), rawCID)
+ commonCmd.ExitOnErr(cmd, "get container: %w", err)
+
+ putPrm := engine.PutPrm{
+ Object: obj,
+ IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value),
+ }
+ commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm))
+ commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr))
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
new file mode 100644
index 000000000..c8fd9e5e5
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
@@ -0,0 +1,123 @@
+package zombie
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+const (
+ flagBatchSize = "batch-size"
+ flagBatchSizeUsage = "Objects iteration batch size"
+ cidFlag = "cid"
+ cidFlagUsage = "Container ID"
+ oidFlag = "oid"
+ oidFlagUsage = "Object ID"
+ walletFlag = "wallet"
+ walletFlagShorthand = "w"
+ walletFlagUsage = "Path to the wallet or binary key"
+ addressFlag = "address"
+ addressFlagUsage = "Address of wallet account"
+ moveFlag = "move"
+ moveFlagUsage = "Move objects from storage engine to quarantine"
+)
+
+var (
+ Cmd = &cobra.Command{
+ Use: "zombie",
+ Short: "Zombie objects related commands",
+ }
+ scanCmd = &cobra.Command{
+ Use: "scan",
+ Short: "Scan storage engine for zombie objects and move them to quarantine",
+ Long: "",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
+ _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
+ _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag))
+ _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag))
+ _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize))
+ _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag))
+ },
+ Run: scan,
+ }
+ listCmd = &cobra.Command{
+ Use: "list",
+ Short: "List zombie objects from quarantine",
+ Long: "",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
+ _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
+ _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
+ },
+ Run: list,
+ }
+ restoreCmd = &cobra.Command{
+ Use: "restore",
+ Short: "Restore zombie objects from quarantine",
+ Long: "",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
+ _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
+ _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
+ _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
+ },
+ Run: restore,
+ }
+ removeCmd = &cobra.Command{
+ Use: "remove",
+ Short: "Remove zombie objects from quarantine",
+ Long: "",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
+ _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
+ _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
+ _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
+ },
+ Run: remove,
+ }
+)
+
+func init() {
+ initScanCmd()
+ initListCmd()
+ initRestoreCmd()
+ initRemoveCmd()
+}
+
+func initScanCmd() {
+ Cmd.AddCommand(scanCmd)
+
+ scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
+ scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
+ scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage)
+ scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage)
+ scanCmd.Flags().String(addressFlag, "", addressFlagUsage)
+ scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage)
+}
+
+func initListCmd() {
+ Cmd.AddCommand(listCmd)
+
+ listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
+ listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
+ listCmd.Flags().String(cidFlag, "", cidFlagUsage)
+}
+
+func initRestoreCmd() {
+ Cmd.AddCommand(restoreCmd)
+
+ restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
+ restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
+ restoreCmd.Flags().String(cidFlag, "", cidFlagUsage)
+ restoreCmd.Flags().String(oidFlag, "", oidFlagUsage)
+}
+
+func initRemoveCmd() {
+ Cmd.AddCommand(removeCmd)
+
+ removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
+ removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
+ removeCmd.Flags().String(cidFlag, "", cidFlagUsage)
+ removeCmd.Flags().String(oidFlag, "", oidFlagUsage)
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
new file mode 100644
index 000000000..268ec4911
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
@@ -0,0 +1,281 @@
+package zombie
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
+ clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+ "golang.org/x/sync/errgroup"
+)
+
+func scan(cmd *cobra.Command, _ []string) {
+ configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
+ configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ batchSize, _ := cmd.Flags().GetUint32(flagBatchSize)
+ if batchSize == 0 {
+ commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value"))
+ }
+ move, _ := cmd.Flags().GetBool(moveFlag)
+
+ storageEngine := newEngine(cmd, appCfg)
+ morphClient := createMorphClient(cmd, appCfg)
+ cnrCli := createContainerClient(cmd, morphClient)
+ nmCli := createNetmapClient(cmd, morphClient)
+ q := createQuarantine(cmd, storageEngine.DumpInfo())
+ pk := getPrivateKey(cmd, appCfg)
+
+ epoch, err := nmCli.Epoch(cmd.Context())
+ commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err)
+
+ nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch)
+ commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err)
+
+ cmd.Printf("Epoch: %d\n", nm.Epoch())
+ cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes()))
+
+ ps := &processStatus{
+ statusCount: make(map[status]uint64),
+ }
+
+ stopCh := make(chan struct{})
+ start := time.Now()
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ tick := time.NewTicker(time.Second)
+ defer tick.Stop()
+ for {
+ select {
+ case <-cmd.Context().Done():
+ return
+ case <-stopCh:
+ return
+ case <-tick.C:
+ fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start))
+ }
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move)
+ close(stopCh)
+ }()
+ wg.Wait()
+ commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err)
+
+ cmd.Println()
+ cmd.Println("Status description:")
+ cmd.Println("undefined -- nothing is clear")
+ cmd.Println("found -- object is found in cluster")
+ cmd.Println("quarantine -- object is not found in cluster")
+ cmd.Println()
+ for status, count := range ps.statusCount {
+ cmd.Printf("Status: %s, Count: %d\n", status, count)
+ }
+}
+
+type status string
+
+const (
+ statusUndefined status = "undefined"
+ statusFound status = "found"
+ statusQuarantine status = "quarantine"
+)
+
+func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) {
+ rawCID := make([]byte, sha256.Size)
+ cid := obj.Address.Container()
+ cid.Encode(rawCID)
+
+ cnr, err := cnrCli.Get(ctx, rawCID)
+ if err != nil {
+ var errContainerNotFound *apistatus.ContainerNotFound
+ if errors.As(err, &errContainerNotFound) {
+ // Policer will deal with this object.
+ return statusFound, nil
+ }
+ return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err)
+ }
+ nm, err := nmCli.NetMap(ctx)
+ if err != nil {
+ return statusUndefined, fmt.Errorf("read netmap from morph: %w", err)
+ }
+
+ nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID)
+ if err != nil {
+ // Not enough nodes, check all netmap nodes.
+ nodes = append([][]netmap.NodeInfo{}, nm.Nodes())
+ }
+
+ objID := obj.Address.Object()
+ cnrID := obj.Address.Container()
+ local := true
+ raw := false
+ if obj.ECInfo != nil {
+ objID = obj.ECInfo.ParentID
+ local = false
+ raw = true
+ }
+ prm := clientSDK.PrmObjectHead{
+ ObjectID: &objID,
+ ContainerID: &cnrID,
+ Local: local,
+ Raw: raw,
+ }
+
+ var ni clientCore.NodeInfo
+ for i := range nodes {
+ for j := range nodes[i] {
+ if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil {
+ return statusUndefined, fmt.Errorf("parse node info: %w", err)
+ }
+ c, err := cc.Get(ni)
+ if err != nil {
+ continue
+ }
+ res, err := c.ObjectHead(ctx, prm)
+ if err != nil {
+ var errECInfo *objectSDK.ECInfoError
+ if raw && errors.As(err, &errECInfo) {
+ return statusFound, nil
+ }
+ continue
+ }
+ if err := apistatus.ErrFromStatus(res.Status()); err != nil {
+ continue
+ }
+ return statusFound, nil
+ }
+ }
+
+ if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 {
+ return statusFound, nil
+ }
+ return statusQuarantine, nil
+}
+
+func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus,
+ appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool,
+) error {
+ cc := cache.NewSDKClientCache(cache.ClientCacheOpts{
+ DialTimeout: apiclientconfig.DialTimeout(appCfg),
+ StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
+ ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
+ Key: pk,
+ AllowExternal: apiclientconfig.AllowExternal(appCfg),
+ })
+ ctx := cmd.Context()
+
+ var cursor *engine.Cursor
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var prm engine.ListWithCursorPrm
+ prm.WithCursor(cursor)
+ prm.WithCount(batchSize)
+
+ res, err := storageEngine.ListWithCursor(ctx, prm)
+ if err != nil {
+ if errors.Is(err, engine.ErrEndOfListing) {
+ return nil
+ }
+ return fmt.Errorf("list with cursor: %w", err)
+ }
+
+ cursor = res.Cursor()
+ addrList := res.AddressList()
+ eg, egCtx := errgroup.WithContext(ctx)
+ eg.SetLimit(int(batchSize))
+
+ for i := range addrList {
+ addr := addrList[i]
+ eg.Go(func() error {
+ result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr)
+ if err != nil {
+ return fmt.Errorf("check object %s status: %w", addr.Address, err)
+ }
+ ps.add(result)
+
+ if !move && result == statusQuarantine {
+ cmd.Println(addr)
+ return nil
+ }
+
+ if result == statusQuarantine {
+ return moveToQuarantine(egCtx, storageEngine, q, addr.Address)
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return fmt.Errorf("process objects batch: %w", err)
+ }
+ }
+}
+
+func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error {
+ var getPrm engine.GetPrm
+ getPrm.WithAddress(addr)
+ res, err := storageEngine.Get(ctx, getPrm)
+ if err != nil {
+ return fmt.Errorf("get object %s from storage engine: %w", addr, err)
+ }
+
+ if err := q.Put(ctx, res.Object()); err != nil {
+ return fmt.Errorf("put object %s to quarantine: %w", addr, err)
+ }
+
+ var delPrm engine.DeletePrm
+ delPrm.WithForceRemoval()
+ delPrm.WithAddress(addr)
+
+ if err = storageEngine.Delete(ctx, delPrm); err != nil {
+ return fmt.Errorf("delete object %s from storage engine: %w", addr, err)
+ }
+ return nil
+}
+
+type processStatus struct {
+ guard sync.RWMutex
+ statusCount map[status]uint64
+ count uint64
+}
+
+func (s *processStatus) add(st status) {
+ s.guard.Lock()
+ defer s.guard.Unlock()
+ s.statusCount[st]++
+ s.count++
+}
+
+func (s *processStatus) total() uint64 {
+ s.guard.RLock()
+ defer s.guard.RUnlock()
+ return s.count
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
new file mode 100644
index 000000000..5be34d502
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
@@ -0,0 +1,201 @@
+package zombie
+
+import (
+ "context"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
+ shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
+ blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
+ fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/panjf2000/ants/v2"
+ "github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
+ ngOpts := storageEngineOptions(c)
+ shardOpts := shardOptions(cmd, c)
+ e := engine.New(ngOpts...)
+ for _, opts := range shardOpts {
+ _, err := e.AddShard(cmd.Context(), opts...)
+ commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
+ }
+ commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
+ commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
+ return e
+}
+
+func storageEngineOptions(c *config.Config) []engine.Option {
+ return []engine.Option{
+ engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
+ engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
+ }
+}
+
+func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
+ var result [][]shard.Option
+ err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
+ result = append(result, getShardOpts(cmd, c, sh))
+ return nil
+ })
+ commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
+ return result
+}
+
+func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
+ wc, wcEnabled := getWriteCacheOpts(sh)
+ return []shard.Option{
+ shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ shard.WithRefillMetabase(sh.RefillMetabase()),
+ shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
+ shard.WithMode(sh.Mode()),
+ shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
+ shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
+ shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
+ shard.WithWriteCache(wcEnabled),
+ shard.WithWriteCacheOptions(wc),
+ shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
+ shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
+ shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
+ shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
+ shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
+ return pool
+ }),
+ shard.WithLimiter(qos.NewNoopLimiter()),
+ }
+}
+
+func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
+ if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
+ var result []writecache.Option
+ result = append(result,
+ writecache.WithPath(wc.Path()),
+ writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
+ writecache.WithMaxObjectSize(wc.MaxObjectSize()),
+ writecache.WithFlushWorkersCount(wc.WorkerCount()),
+ writecache.WithMaxCacheSize(wc.SizeLimit()),
+ writecache.WithMaxCacheCount(wc.CountLimit()),
+ writecache.WithNoSync(wc.NoSync()),
+ writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ writecache.WithQoSLimiter(qos.NewNoopLimiter()),
+ )
+ return result, true
+ }
+ return nil, false
+}
+
+func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
+ var piloramaOpts []pilorama.Option
+ if config.BoolSafe(c.Sub("tree"), "enabled") {
+ pr := sh.Pilorama()
+ piloramaOpts = append(piloramaOpts,
+ pilorama.WithPath(pr.Path()),
+ pilorama.WithPerm(pr.Perm()),
+ pilorama.WithNoSync(pr.NoSync()),
+ pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
+ pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
+ )
+ }
+ return piloramaOpts
+}
+
+func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
+ return []meta.Option{
+ meta.WithPath(sh.Metabase().Path()),
+ meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
+ meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
+ meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
+ meta.WithBoltDBOptions(&bbolt.Options{
+ Timeout: 100 * time.Millisecond,
+ }),
+ meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ meta.WithEpochState(&epochState{}),
+ }
+}
+
+func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
+ result := []blobstor.Option{
+ blobstor.WithCompression(sh.Compression()),
+ blobstor.WithStorages(getSubStorages(ctx, sh)),
+ blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ }
+
+ return result
+}
+
+func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
+ var ss []blobstor.SubStorage
+ for _, storage := range sh.BlobStor().Storages() {
+ switch storage.Type() {
+ case blobovniczatree.Type:
+ sub := blobovniczaconfig.From((*config.Config)(storage))
+ blobTreeOpts := []blobovniczatree.Option{
+ blobovniczatree.WithRootPath(storage.Path()),
+ blobovniczatree.WithPermissions(storage.Perm()),
+ blobovniczatree.WithBlobovniczaSize(sub.Size()),
+ blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
+ blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
+ blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
+ blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
+ blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
+ blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
+ blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
+ blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
+ }
+
+ ss = append(ss, blobstor.SubStorage{
+ Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
+ return uint64(len(data)) < sh.SmallSizeLimit()
+ },
+ })
+ case fstree.Type:
+ sub := fstreeconfig.From((*config.Config)(storage))
+ fstreeOpts := []fstree.Option{
+ fstree.WithPath(storage.Path()),
+ fstree.WithPerm(storage.Perm()),
+ fstree.WithDepth(sub.Depth()),
+ fstree.WithNoSync(sub.NoSync()),
+ fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ }
+
+ ss = append(ss, blobstor.SubStorage{
+ Storage: fstree.New(fstreeOpts...),
+ Policy: func(_ *objectSDK.Object, _ []byte) bool {
+ return true
+ },
+ })
+ default:
+ // should never happen, that has already
+ // been handled: when the config was read
+ }
+ }
+ return ss
+}
+
+type epochState struct{}
+
+func (epochState) CurrentEpoch() uint64 {
+ return 0
+}
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
index 00b30c9b2..c0c290c5e 100644
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -28,6 +28,7 @@ const (
var (
errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found")
+ errUpgradeFailed = errors.New("upgrade failed")
)
var UpgradeCmd = &cobra.Command{
@@ -91,14 +92,19 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil {
return err
}
+ allSuccess := true
for mb, ok := range result {
if ok {
cmd.Println(mb, ": success")
} else {
cmd.Println(mb, ": failed")
+ allSuccess = false
}
}
- return nil
+ if allSuccess {
+ return nil
+ }
+ return errUpgradeFailed
}
func getMetabasePaths(appCfg *config.Config) ([]string, error) {
@@ -135,7 +141,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
if err != nil {
return nil, fmt.Errorf("resolve container contract hash: %w", err)
}
- cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
+ cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
if err != nil {
return nil, fmt.Errorf("create morph container client: %w", err)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
index 077e03737..1960faab4 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
@@ -5,35 +5,19 @@ import (
"encoding/json"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
- namespaceTarget = "namespace"
- containerTarget = "container"
- userTarget = "user"
- groupTarget = "group"
- jsonFlag = "json"
- jsonFlagDesc = "Output rule chains in JSON format"
- chainIDFlag = "chain-id"
- chainIDDesc = "Rule chain ID"
- ruleFlag = "rule"
- ruleFlagDesc = "Rule chain in text format"
- pathFlag = "path"
- pathFlagDesc = "path to encoded chain in JSON or binary format"
- targetNameFlag = "target-name"
- targetNameDesc = "Resource name in APE resource name format"
- targetTypeFlag = "target-type"
- targetTypeDesc = "Resource type(container/namespace)"
- addrAdminFlag = "addr"
- addrAdminDesc = "The address of the admins wallet"
- chainNameFlag = "chain-name"
- chainNameFlagDesc = "Chain name(ingress|s3)"
+ jsonFlag = "json"
+ jsonFlagDesc = "Output rule chains in JSON format"
+ addrAdminFlag = "addr"
+ addrAdminDesc = "The address of the admins wallet"
)
var (
@@ -101,17 +85,17 @@ func initAddRuleChainCmd() {
addRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
addRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- addRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc)
- _ = addRuleChainCmd.MarkFlagRequired(targetTypeFlag)
- addRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc)
- _ = addRuleChainCmd.MarkFlagRequired(targetNameFlag)
+ addRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ addRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
- addRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc)
- _ = addRuleChainCmd.MarkFlagRequired(chainIDFlag)
- addRuleChainCmd.Flags().StringArray(ruleFlag, []string{}, ruleFlagDesc)
- addRuleChainCmd.Flags().String(pathFlag, "", pathFlagDesc)
- addRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
- addRuleChainCmd.MarkFlagsMutuallyExclusive(ruleFlag, pathFlag)
+ addRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(apeCmd.ChainIDFlag)
+ addRuleChainCmd.Flags().StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc)
+ addRuleChainCmd.Flags().String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc)
+ addRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
+ addRuleChainCmd.MarkFlagsMutuallyExclusive(apeCmd.RuleFlag, apeCmd.PathFlag)
}
func initRemoveRuleChainCmd() {
@@ -120,26 +104,25 @@ func initRemoveRuleChainCmd() {
removeRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
removeRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- removeRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(targetTypeFlag)
- removeRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(targetNameFlag)
- removeRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc)
- removeRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
+ removeRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ removeRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
+ removeRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
+ removeRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
removeRuleChainCmd.Flags().Bool(commonflags.AllFlag, false, "Remove all chains for target")
- removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, chainIDFlag)
+ removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, apeCmd.ChainIDFlag)
}
func initListRuleChainsCmd() {
Cmd.AddCommand(listRuleChainsCmd)
listRuleChainsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listRuleChainsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc)
- _ = listRuleChainsCmd.MarkFlagRequired(targetTypeFlag)
- listRuleChainsCmd.Flags().String(targetNameFlag, "", targetNameDesc)
- _ = listRuleChainsCmd.MarkFlagRequired(targetNameFlag)
+ listRuleChainsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
+ _ = listRuleChainsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ listRuleChainsCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
listRuleChainsCmd.Flags().Bool(jsonFlag, false, jsonFlagDesc)
- listRuleChainsCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
+ listRuleChainsCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
}
func initSetAdminCmd() {
@@ -161,15 +144,15 @@ func initListTargetsCmd() {
Cmd.AddCommand(listTargetsCmd)
listTargetsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listTargetsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc)
- _ = listTargetsCmd.MarkFlagRequired(targetTypeFlag)
+ listTargetsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
+ _ = listTargetsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
}
func addRuleChain(cmd *cobra.Command, _ []string) {
- chain := parseChain(cmd)
+ chain := apeCmd.ParseChain(cmd)
target := parseTarget(cmd)
pci, ac := newPolicyContractInterface(cmd)
- h, vub, err := pci.AddMorphRuleChain(parseChainName(cmd), target, chain)
+ h, vub, err := pci.AddMorphRuleChain(apeCmd.ParseChainName(cmd), target, chain)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "add rule chain error: %w", err)
@@ -181,14 +164,14 @@ func removeRuleChain(cmd *cobra.Command, _ []string) {
pci, ac := newPolicyContractInterface(cmd)
removeAll, _ := cmd.Flags().GetBool(commonflags.AllFlag)
if removeAll {
- h, vub, err := pci.RemoveMorphRuleChainsByTarget(parseChainName(cmd), target)
+ h, vub, err := pci.RemoveMorphRuleChainsByTarget(apeCmd.ParseChainName(cmd), target)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
cmd.Println("All chains for target removed successfully")
} else {
- chainID := parseChainID(cmd)
- h, vub, err := pci.RemoveMorphRuleChain(parseChainName(cmd), target, chainID)
+ chainID := apeCmd.ParseChainID(cmd)
+ h, vub, err := pci.RemoveMorphRuleChain(apeCmd.ParseChainName(cmd), target, chainID)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
@@ -199,7 +182,7 @@ func removeRuleChain(cmd *cobra.Command, _ []string) {
func listRuleChains(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
pci, _ := newPolicyContractReaderInterface(cmd)
- chains, err := pci.ListMorphRuleChains(parseChainName(cmd), target)
+ chains, err := pci.ListMorphRuleChains(apeCmd.ParseChainName(cmd), target)
commonCmd.ExitOnErr(cmd, "list rule chains error: %w", err)
if len(chains) == 0 {
return
@@ -210,14 +193,14 @@ func listRuleChains(cmd *cobra.Command, _ []string) {
prettyJSONFormat(cmd, chains)
} else {
for _, c := range chains {
- parseutil.PrintHumanReadableAPEChain(cmd, c)
+ apeCmd.PrintHumanReadableAPEChain(cmd, c)
}
}
}
func setAdmin(cmd *cobra.Command, _ []string) {
s, _ := cmd.Flags().GetString(addrAdminFlag)
- addr, err := util.Uint160DecodeStringLE(s)
+ addr, err := address.StringToUint160(s)
commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err)
pci, ac := newPolicyContractInterface(cmd)
h, vub, err := pci.SetAdmin(addr)
@@ -231,12 +214,11 @@ func getAdmin(cmd *cobra.Command, _ []string) {
pci, _ := newPolicyContractReaderInterface(cmd)
addr, err := pci.GetAdmin()
commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err)
- cmd.Println(addr.StringLE())
+ cmd.Println(address.Uint160ToString(addr))
}
func listTargets(cmd *cobra.Command, _ []string) {
- typ, err := parseTargetType(cmd)
- commonCmd.ExitOnErr(cmd, "parse target type error: %w", err)
+ typ := apeCmd.ParseTargetType(cmd)
pci, inv := newPolicyContractReaderInterface(cmd)
sid, it, err := pci.ListTargetsIterator(typ)
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
index d4aedda2e..3c332c3f0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
@@ -2,13 +2,14 @@ package ape
import (
"errors"
- "strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -18,90 +19,29 @@ import (
"github.com/spf13/viper"
)
-const (
- ingress = "ingress"
- s3 = "s3"
-)
-
-var mChainName = map[string]apechain.Name{
- ingress: apechain.Ingress,
- s3: apechain.S3,
-}
-
-var (
- errUnknownTargetType = errors.New("unknown target type")
- errChainIDCannotBeEmpty = errors.New("chain id cannot be empty")
- errRuleIsNotParsed = errors.New("rule is not passed")
- errUnsupportedChainName = errors.New("unsupported chain name")
-)
+var errUnknownTargetType = errors.New("unknown target type")
func parseTarget(cmd *cobra.Command) policyengine.Target {
- name, _ := cmd.Flags().GetString(targetNameFlag)
- typ, err := parseTargetType(cmd)
-
- // interpret "root" namespace as empty
- if typ == policyengine.Namespace && name == "root" {
- name = ""
- }
-
- commonCmd.ExitOnErr(cmd, "read target type error: %w", err)
-
- return policyengine.Target{
- Name: name,
- Type: typ,
- }
-}
-
-func parseTargetType(cmd *cobra.Command) (policyengine.TargetType, error) {
- typ, _ := cmd.Flags().GetString(targetTypeFlag)
+ typ := apeCmd.ParseTargetType(cmd)
+ name, _ := cmd.Flags().GetString(apeCmd.TargetNameFlag)
switch typ {
- case namespaceTarget:
- return policyengine.Namespace, nil
- case containerTarget:
- return policyengine.Container, nil
- case userTarget:
- return policyengine.User, nil
- case groupTarget:
- return policyengine.Group, nil
+ case policyengine.Namespace:
+ if name == "root" {
+ name = ""
+ }
+ return policyengine.NamespaceTarget(name)
+ case policyengine.Container:
+ var cnr cid.ID
+ commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
+ return policyengine.ContainerTarget(name)
+ case policyengine.User:
+ return policyengine.UserTarget(name)
+ case policyengine.Group:
+ return policyengine.GroupTarget(name)
+ default:
+ commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
}
- return -1, errUnknownTargetType
-}
-
-func parseChainID(cmd *cobra.Command) apechain.ID {
- chainID, _ := cmd.Flags().GetString(chainIDFlag)
- if chainID == "" {
- commonCmd.ExitOnErr(cmd, "read chain id error: %w",
- errChainIDCannotBeEmpty)
- }
- return apechain.ID(chainID)
-}
-
-func parseChain(cmd *cobra.Command) *apechain.Chain {
- chain := new(apechain.Chain)
-
- if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
- commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules))
- } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
- commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath))
- } else {
- commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed)
- }
-
- chain.ID = parseChainID(cmd)
-
- cmd.Println("Parsed chain:")
- parseutil.PrintHumanReadableAPEChain(cmd, chain)
-
- return chain
-}
-
-func parseChainName(cmd *cobra.Command) apechain.Name {
- chainName, _ := cmd.Flags().GetString(chainNameFlag)
- apeChainName, ok := mChainName[strings.ToLower(chainName)]
- if !ok {
- commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName)
- }
- return apeChainName
+ panic("unreachable")
}
// invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface.
@@ -115,16 +55,15 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
}
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
inv := invoker.New(c, nil)
- var ch util.Uint160
r := management.NewReader(inv)
nnsCs, err := helper.GetContractByID(r, 1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
- ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
+ ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
invokerAdapter := &invokerAdapter{
@@ -136,10 +75,11 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
}
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
- ac, err := helper.NewLocalActor(cmd, c)
+ walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
+ ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
var ch util.Uint160
diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
index 5519705d4..23dba14f4 100644
--- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
+++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -51,7 +52,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
nmHash util.Uint160
)
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return err
}
@@ -161,9 +162,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
}
- if w.Err != nil {
- panic(w.Err)
- }
+ assert.NoError(w.Err)
alphaRes, err := c.InvokeScript(w.Bytes(), nil)
if err != nil {
@@ -226,9 +225,7 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
for i := range accounts {
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
}
- if w.Err != nil {
- panic(w.Err)
- }
+ assert.NoError(w.Err)
res, err := c.Run(w.Bytes())
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go
index 3a7f84acb..c17fb62ff 100644
--- a/cmd/frostfs-adm/internal/modules/morph/config/config.go
+++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go
@@ -26,7 +26,7 @@ import (
const forceConfigSet = "force"
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
nbuf := make([]byte, 8)
- copy(nbuf[:], v)
+ copy(nbuf, v)
n := binary.LittleEndian.Uint64(nbuf)
- _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k)
}
- _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
default:
- _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
}
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/constants/const.go b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
index a3b4f129a..be4041a86 100644
--- a/cmd/frostfs-adm/internal/modules/morph/constants/const.go
+++ b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
@@ -4,7 +4,6 @@ import "time"
const (
ConsensusAccountName = "consensus"
- ProtoConfigPath = "protocol"
// MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size
// of the invocation script.
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go
index 6f08d1655..79685f111 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -76,7 +77,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("invalid filename: %w", err)
}
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -157,7 +158,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
}
func listContainers(cmd *cobra.Command, _ []string) error {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -235,9 +236,7 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
putContainer(bw, ch, cnt)
- if bw.Err != nil {
- panic(bw.Err)
- }
+ assert.NoError(bw.Err)
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
index 5adb480da..543b5fcb3 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/cli/cmdargs"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
@@ -120,9 +121,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
}
}
- if writer.Err != nil {
- panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
- }
+ assert.NoError(writer.Err, "can't create deployment script")
if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
return err
@@ -173,9 +172,8 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
}
- if bw.Err != nil {
- panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
- } else if bw.Len() != start {
+ assert.NoError(bw.Err, "can't create deployment script")
+ if bw.Len() != start {
writer.WriteBytes(bw.Bytes())
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
index be2134b77..fde58fd2b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
@@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -36,7 +37,7 @@ type contractDumpInfo struct {
}
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -219,8 +220,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" {
info.version = "unknown"
}
- _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
- info.name, info.version, info.hash.StringLE())))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
+ info.name, info.version, info.hash.StringLE()))
}
_ = tw.Flush()
@@ -236,21 +237,17 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
} else {
sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
- if sub.Err != nil {
- panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
- }
+ assert.NoError(sub.Err, "can't create version script")
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
- bw.BinWriter.WriteBytes(script)
+ bw.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}
}
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
- if bw.Err != nil {
- panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
- }
+ assert.NoError(bw.Err, "can't create version script")
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
new file mode 100644
index 000000000..4046e85e3
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
@@ -0,0 +1,83 @@
+package frostfsid
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+var (
+ frostfsidAddSubjectKeyCmd = &cobra.Command{
+ Use: "add-subject-key",
+ Short: "Add a public key to the subject in frostfsid contract",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ },
+ Run: frostfsidAddSubjectKey,
+ }
+ frostfsidRemoveSubjectKeyCmd = &cobra.Command{
+ Use: "remove-subject-key",
+ Short: "Remove a public key from the subject in frostfsid contract",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ },
+ Run: frostfsidRemoveSubjectKey,
+ }
+)
+
+func initFrostfsIDAddSubjectKeyCmd() {
+ Cmd.AddCommand(frostfsidAddSubjectKeyCmd)
+
+ ff := frostfsidAddSubjectKeyCmd.Flags()
+ ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+
+ ff.String(subjectAddressFlag, "", "Subject address")
+ _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
+
+ ff.String(subjectKeyFlag, "", "Public key to add")
+ _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
+}
+
+func initFrostfsIDRemoveSubjectKeyCmd() {
+ Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd)
+
+ ff := frostfsidRemoveSubjectKeyCmd.Flags()
+ ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+
+ ff.String(subjectAddressFlag, "", "Subject address")
+ _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
+
+ ff.String(subjectKeyFlag, "", "Public key to remove")
+ _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
+}
+
+func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) {
+ addr := getFrostfsIDSubjectAddress(cmd)
+ pub := getFrostfsIDSubjectKey(cmd)
+
+ ffsid, err := newFrostfsIDClient(cmd)
+ commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
+
+ ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub))
+
+ err = ffsid.sendWait()
+ commonCmd.ExitOnErr(cmd, "add subject key: %w", err)
+}
+
+func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) {
+ addr := getFrostfsIDSubjectAddress(cmd)
+ pub := getFrostfsIDSubjectKey(cmd)
+
+ ffsid, err := newFrostfsIDClient(cmd)
+ commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
+
+ ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub))
+
+ err = ffsid.sendWait()
+ commonCmd.ExitOnErr(cmd, "remove subject key: %w", err)
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
index 091d6634a..7f777db98 100644
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
@@ -1,6 +1,7 @@
package frostfsid
import (
+ "encoding/hex"
"errors"
"fmt"
"math/big"
@@ -34,11 +35,16 @@ const (
subjectNameFlag = "subject-name"
subjectKeyFlag = "subject-key"
subjectAddressFlag = "subject-address"
- includeNamesFlag = "include-names"
+ extendedFlag = "extended"
groupNameFlag = "group-name"
groupIDFlag = "group-id"
rootNamespacePlaceholder = "
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
+| Parameter | Type | Default value | Description |
+|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------|
+| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
+| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. |
+
+## `tags` subsection
+| Parameter | Type | Default value | Description |
+|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. |
+| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. |
# `contracts` section
Contains override values for FrostFS side-chain contract hashes. Most of the time contract
@@ -147,15 +159,19 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
switch_interval: 2m
+ netmap:
+ candidates:
+ poll_interval: 20s
```
-| Parameter | Type | Default value | Description |
-| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
-| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
-| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
-| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
-| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
+| Parameter | Type | Default value | Description |
+|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
+| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
+| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
+| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
+| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
+| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
## `rpc_endpoint` subsection
| Parameter | Type | Default value | Description |
@@ -169,7 +185,6 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
-| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@@ -180,20 +195,41 @@ Contains configuration for each shard. Keys must be consecutive numbers starting
`default` subsection has the same format and specifies defaults for missing values.
The following table describes configuration for each shard.
-| Parameter | Type | Default value | Description |
-| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `compress` | `bool` | `false` | Flag to enable compression. |
-| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
-| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
-| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
-| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
-| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
-| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
-| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
-| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
-| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
-| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
-| `gc` | [GC config](#gc-subsection) | | GC configuration. |
+| Parameter | Type | Default value | Description |
+| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- |
+| `compression` | [Compression config](#compression-subsection) | | Compression config. |
+| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
+| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
+| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
+| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
+| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
+| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
+| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
+| `gc` | [GC config](#gc-subsection) | | GC configuration. |
+| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
+
+### `compression` subsection
+
+Contains compression config.
+
+```yaml
+compression:
+ enabled: true
+ level: smallest_size
+ exclude_content_types:
+ - audio/*
+ - video/*
+ estimate_compressibility: true
+ estimate_compressibility_threshold: 0.7
+```
+
+| Parameter | Type | Default value | Description |
+| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `enabled` | `bool` | `false` | Flag to enable compression. |
+| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. |
+| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
+| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
+| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
### `blobstor` subsection
@@ -208,7 +244,7 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
- perm: 0644
+ perm: 0o644
size: 4194304
depth: 1
width: 4
@@ -268,7 +304,7 @@ gc:
```yaml
metabase:
path: /path/to/meta.db
- perm: 0644
+ perm: 0o644
max_batch_size: 200
max_batch_delay: 20ms
```
@@ -300,6 +336,65 @@ writecache:
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
+### `limits` subsection
+
+```yaml
+limits:
+ max_read_running_ops: 10000
+ max_read_waiting_ops: 1000
+ max_write_running_ops: 1000
+ max_write_waiting_ops: 100
+ read:
+ - tag: internal
+ weight: 20
+ limit_ops: 0
+ reserved_ops: 1000
+ - tag: client
+ weight: 70
+ reserved_ops: 10000
+ - tag: background
+ weight: 5
+ limit_ops: 10000
+ reserved_ops: 0
+ - tag: writecache
+ weight: 5
+ limit_ops: 25000
+ - tag: policer
+ weight: 5
+ limit_ops: 25000
+ write:
+ - tag: internal
+ weight: 200
+ limit_ops: 0
+ reserved_ops: 100
+ - tag: client
+ weight: 700
+ reserved_ops: 1000
+ - tag: background
+ weight: 50
+ limit_ops: 1000
+ reserved_ops: 0
+ - tag: writecache
+ weight: 50
+ limit_ops: 2500
+ - tag: policer
+ weight: 50
+ limit_ops: 2500
+```
+
+| Parameter | Type | Default value | Description |
+| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
+| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
+| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
+| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
+| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
+| `read` | `[]tag` | empty | Array of shard read settings for tags. |
+| `write` | `[]tag` | empty | Array of shard write settings for tags. |
+| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
+| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
+| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
+| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
+| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section
@@ -315,22 +410,22 @@ node:
- "Price:11"
- "UN-LOCODE:RU MSK"
- "key:value"
- relay: false
persistent_sessions:
path: /sessions
persistent_state:
path: /state
+ locode_db_path: "/path/to/locode/db"
```
-| Parameter | Type | Default value | Description |
-|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------|
-| `key` | `string` | | Path to the binary-encoded private key. |
-| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
-| `addresses` | `[]string` | | Addresses advertised in the netmap. |
-| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in ` bucket
if len(lst) == 0 {
- _ = bkt.Delete(item.key) // ignore error, best effort there
-
- return nil
+ return bkt.Delete(item.key)
}
// if list is not empty, then update it
encodedLst, err := encodeList(lst)
if err != nil {
- return nil // ignore error, best effort there
+ return err
}
- _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there
- return nil
+ return bkt.Put(item.key, encodedLst)
}
func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
@@ -480,35 +478,47 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error
return ErrUnknownObjectType
}
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
} else {
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: parentBucketName(cnr, bucketName),
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
}
- delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
+ if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
name: smallBucketName(cnr, bucketName),
key: objKey,
- })
- delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
+ }); err != nil {
+ return err
+ }
+ if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
name: rootBucketName(cnr, bucketName),
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
if expEpoch, ok := hasExpirationEpoch(obj); ok {
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: expEpochToObjectBucketName,
key: expirationEpochKey(expEpoch, cnr, addr.Object()),
- })
- delUniqueIndexItem(tx, namedBucketItem{
+ }); err != nil {
+ return err
+ }
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
}
return nil
@@ -529,16 +539,18 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
+ return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
}
}
// also drop EC parent root info if current EC chunk is the last one
if !hasAnyChunks {
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
- })
+ }); err != nil {
+ return err
+ }
}
if ech.ParentSplitParentID() == nil {
@@ -567,16 +579,15 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
+ return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
}
}
// drop split info
- delUniqueIndexItem(tx, namedBucketItem{
+ return delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)),
})
- return nil
}
func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool {
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
index a25627990..884da23ff 100644
--- a/pkg/local_object_storage/metabase/delete_ec_test.go
+++ b/pkg/local_object_storage/metabase/delete_ec_test.go
@@ -30,8 +30,8 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close()) }()
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
ecChunk := oidtest.ID()
@@ -130,17 +130,9 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
require.Equal(t, 2, len(tombstonedObjects))
- var tombstones []oid.Address
- for _, tss := range tombstonedObjects {
- tombstones = append(tombstones, tss.tomb)
- }
- inhumePrm.SetAddresses(tombstones...)
- inhumePrm.SetGCMark()
- _, err = db.Inhume(context.Background(), inhumePrm)
+ _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
require.NoError(t, err)
- require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects))
-
// GC finds tombstone as garbage and deletes it
garbageAddresses = nil
@@ -194,8 +186,8 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close()) }()
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
ecChunks := make([]oid.ID, chunksCount)
@@ -374,17 +366,9 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool
require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
require.True(t, len(tombstonedObjects) == parentCount+chunksCount)
- var tombstones []oid.Address
- for _, tss := range tombstonedObjects {
- tombstones = append(tombstones, tss.tomb)
- }
- inhumePrm.SetAddresses(tombstones...)
- inhumePrm.SetGCMark()
- _, err = db.Inhume(context.Background(), inhumePrm)
+ _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
require.NoError(t, err)
- require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects))
-
// GC finds tombstone as garbage and deletes it
garbageAddresses = nil
diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go
index cdfe2a203..0329e3a73 100644
--- a/pkg/local_object_storage/metabase/delete_meta_test.go
+++ b/pkg/local_object_storage/metabase/delete_meta_test.go
@@ -23,8 +23,8 @@ func TestPutDeleteIndexAttributes(t *testing.T) {
}...)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close()) }()
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
obj1 := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index fe5f7833b..c0762a377 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -18,7 +18,7 @@ import (
func TestDB_Delete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
parent := testutil.GenerateObjectWithCID(cnr)
@@ -65,7 +65,7 @@ func TestDB_Delete(t *testing.T) {
func TestDeleteAllChildren(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -103,7 +103,7 @@ func TestDeleteAllChildren(t *testing.T) {
func TestGraveOnlyDelete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
addr := oidtest.Address()
@@ -116,7 +116,7 @@ func TestGraveOnlyDelete(t *testing.T) {
func TestExpiredObject(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
// removing expired object should be error-free
@@ -128,7 +128,7 @@ func TestExpiredObject(t *testing.T) {
func TestDelete(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
for range 10 {
@@ -170,7 +170,7 @@ func TestDelete(t *testing.T) {
func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
addr := oidtest.Address()
diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go
index 2e1b1dce8..7bd6f90a6 100644
--- a/pkg/local_object_storage/metabase/exists.go
+++ b/pkg/local_object_storage/metabase/exists.go
@@ -19,8 +19,8 @@ import (
// ExistsPrm groups the parameters of Exists operation.
type ExistsPrm struct {
- addr oid.Address
- paddr oid.Address
+ addr oid.Address
+ ecParentAddr oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
@@ -36,9 +36,9 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) {
p.addr = addr
}
-// SetParent is an Exists option to set objects parent.
-func (p *ExistsPrm) SetParent(addr oid.Address) {
- p.paddr = addr
+// SetECParent is an Exists option to set objects parent.
+func (p *ExistsPrm) SetECParent(addr oid.Address) {
+ p.ecParentAddr = addr
}
// Exists returns the fact that the object is in the metabase.
@@ -81,7 +81,7 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
currEpoch := db.epochState.CurrentEpoch()
err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.exists, res.locked, err = db.exists(tx, prm.addr, prm.paddr, currEpoch)
+ res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch)
return err
})
@@ -89,10 +89,21 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
return res, metaerr.Wrap(err)
}
-func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpoch uint64) (bool, bool, error) {
+func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) {
var locked bool
- if !parent.Equals(oid.Address{}) {
- locked = objectLocked(tx, parent.Container(), parent.Object())
+ if !ecParent.Equals(oid.Address{}) {
+ st, err := objectStatus(tx, ecParent, currEpoch)
+ if err != nil {
+ return false, false, err
+ }
+ switch st {
+ case 2:
+ return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
+ case 3:
+ return false, locked, ErrObjectIsExpired
+ }
+
+ locked = objectLocked(tx, ecParent.Container(), ecParent.Object())
}
// check graveyard and object expiration first
st, err := objectStatus(tx, addr, currEpoch)
@@ -142,12 +153,16 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpo
// - 2 if object is covered with tombstone;
// - 3 if object is expired.
func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
+ return objectStatusWithCache(nil, tx, addr, currEpoch)
+}
+
+func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
// locked object could not be removed/marked with GC/expired
- if objectLocked(tx, addr.Container(), addr.Object()) {
+ if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) {
return 0, nil
}
- expired, err := isExpired(tx, addr, currEpoch)
+ expired, err := isExpiredWithCache(bc, tx, addr, currEpoch)
if err != nil {
return 0, err
}
@@ -156,8 +171,8 @@ func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, erro
return 3, nil
}
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
+ graveyardBkt := getGraveyardBucket(bc, tx)
+ garbageBkt := getGarbageBucket(bc, tx)
addrKey := addressKey(addr, make([]byte, addressKeySize))
return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil
}
@@ -217,7 +232,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e
err := splitInfo.Unmarshal(rawSplitInfo)
if err != nil {
- return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err)
+ return nil, fmt.Errorf("unmarshal split info from root index: %w", err)
}
return splitInfo, nil
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index 1e4148eba..3045e17f1 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"errors"
"testing"
@@ -18,7 +19,7 @@ const currEpoch = 1000
func TestDB_Exists(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
t.Run("no object", func(t *testing.T) {
nonExist := testutil.GenerateObject()
diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go
index 68144d8b1..a1351cb6f 100644
--- a/pkg/local_object_storage/metabase/expired.go
+++ b/pkg/local_object_storage/metabase/expired.go
@@ -74,9 +74,11 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
}
func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
- bucketName := make([]byte, bucketKeySize)
- bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName)
- b := tx.Bucket(bucketName)
+ return isExpiredWithCache(nil, tx, addr, currEpoch)
+}
+
+func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
+ b := getExpiredBucket(bc, tx, addr.Container())
if b == nil {
return false, nil
}
diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go
index bb98745ee..495c1eee7 100644
--- a/pkg/local_object_storage/metabase/expired_test.go
+++ b/pkg/local_object_storage/metabase/expired_test.go
@@ -13,7 +13,7 @@ import (
func TestDB_SelectExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
containerID1 := cidtest.ID()
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index 776f5d27c..821810c09 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -1,7 +1,6 @@
package meta
import (
- "bytes"
"context"
"fmt"
"time"
@@ -89,8 +88,12 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
}
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
+ return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch)
+}
+
+func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
if checkStatus {
- st, err := objectStatus(tx, addr, currEpoch)
+ st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
if err != nil {
return nil, err
}
@@ -110,12 +113,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b
bucketName := make([]byte, bucketKeySize)
// check in primary index
- data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key)
- if len(data) != 0 {
- return obj, obj.Unmarshal(bytes.Clone(data))
+ if b := getPrimaryBucket(bc, tx, cnr); b != nil {
+ if data := b.Get(key); len(data) != 0 {
+ return obj, obj.Unmarshal(data)
+ }
}
- data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
+ data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
if len(data) != 0 {
return nil, getECInfoError(tx, cnr, data)
}
@@ -123,13 +127,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b
// if not found then check in tombstone index
data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(bytes.Clone(data))
+ return obj, obj.Unmarshal(data)
}
// if not found then check in locker index
data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(bytes.Clone(data))
+ return obj, obj.Unmarshal(data)
}
// if not found then check if object is a virtual
@@ -187,7 +191,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
err = child.Unmarshal(data)
if err != nil {
- return nil, fmt.Errorf("can't unmarshal child with parent: %w", err)
+ return nil, fmt.Errorf("unmarshal child with parent: %w", err)
}
par := child.Parent()
@@ -216,10 +220,10 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error {
ecInfo := objectSDK.NewECInfo()
for _, key := range keys {
// check in primary index
- ojbData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
- if len(ojbData) != 0 {
+ objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
+ if len(objData) != 0 {
obj := objectSDK.New()
- if err := obj.Unmarshal(ojbData); err != nil {
+ if err := obj.Unmarshal(objData); err != nil {
return err
}
chunk := objectSDK.ECChunk{}
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index f0caaea70..98c428410 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -25,7 +25,7 @@ import (
func TestDB_Get(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
raw := testutil.GenerateObject()
@@ -219,7 +219,6 @@ func benchmarkGet(b *testing.B, numOfObj int) {
meta.WithMaxBatchSize(batchSize),
meta.WithMaxBatchDelay(10*time.Millisecond),
)
- defer func() { require.NoError(b, db.Close()) }()
addrs := make([]oid.Address, 0, numOfObj)
for range numOfObj {
@@ -234,6 +233,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
}
db, addrs := prepareDb(runtime.NumCPU())
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
b.Run("parallel", func(b *testing.B) {
b.ReportAllocs()
@@ -253,7 +253,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
})
})
- require.NoError(b, db.Close())
+ require.NoError(b, db.Close(context.Background()))
require.NoError(b, os.RemoveAll(b.Name()))
db, addrs = prepareDb(1)
diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go
index 31f95d6ed..2f23d424c 100644
--- a/pkg/local_object_storage/metabase/graveyard.go
+++ b/pkg/local_object_storage/metabase/graveyard.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -176,7 +177,7 @@ type gcHandler struct {
func (g gcHandler) handleKV(k, _ []byte) error {
o, err := garbageFromKV(k)
if err != nil {
- return fmt.Errorf("could not parse garbage object: %w", err)
+ return fmt.Errorf("parse garbage object: %w", err)
}
return g.h(o)
@@ -189,7 +190,7 @@ type graveyardHandler struct {
func (g graveyardHandler) handleKV(k, v []byte) error {
o, err := graveFromKV(k, v)
if err != nil {
- return fmt.Errorf("could not parse grave: %w", err)
+ return fmt.Errorf("parse grave: %w", err)
}
return g.h(o)
@@ -239,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address)
func garbageFromKV(k []byte) (res GarbageObject, err error) {
err = decodeAddressFromKey(&res.addr, k)
if err != nil {
- err = fmt.Errorf("could not parse address: %w", err)
+ err = fmt.Errorf("parse address: %w", err)
}
return
@@ -255,46 +256,58 @@ func graveFromKV(k, v []byte) (res TombstonedObject, err error) {
return
}
-// DropGraves deletes tombstoned objects from the
+// InhumeTombstones deletes tombstoned objects from the
// graveyard bucket.
//
// Returns any error appeared during deletion process.
-func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error {
+func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("DropGraves", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.DropGraves")
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones")
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
if db.mode.NoMetabase() {
- return ErrDegradedMode
+ return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
- return ErrReadOnlyMode
+ return InhumeRes{}, ErrReadOnlyMode
}
buf := make([]byte, addressKeySize)
+ prm := InhumePrm{forceRemoval: true}
+ currEpoch := db.epochState.CurrentEpoch()
- return db.boltDB.Batch(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(graveyardBucketName)
- if bkt == nil {
- return nil
+ var res InhumeRes
+
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)}
+
+ garbageBKT := tx.Bucket(garbageBucketName)
+ graveyardBKT := tx.Bucket(graveyardBucketName)
+
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
+ if err != nil {
+ return err
}
- for _, ts := range tss {
- err := bkt.Delete(addressKey(ts.Address(), buf))
- if err != nil {
+ for i := range tss {
+ if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil {
+ return err
+ }
+ if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil {
return err
}
}
return nil
})
+ return res, err
}
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index b9c6ce28c..ebadecc04 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -7,7 +7,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -15,7 +17,7 @@ import (
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
var counter int
var iterGravePRM meta.GraveyardIterationPrm
@@ -42,7 +44,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
func TestDB_Iterate_OffsetNotFound(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
obj1 := testutil.GenerateObject()
obj2 := testutil.GenerateObject()
@@ -113,7 +115,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
// generate and put 4 objects
@@ -202,7 +204,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
// generate and put 4 objects
@@ -303,7 +305,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
func TestDB_IterateOverGarbage_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// generate and put 4 objects
obj1 := testutil.GenerateObject()
@@ -393,9 +395,9 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
require.False(t, iWasCalled)
}
-func TestDB_DropGraves(t *testing.T) {
+func TestDB_InhumeTombstones(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
// generate and put 2 objects
@@ -410,9 +412,20 @@ func TestDB_DropGraves(t *testing.T) {
err = putBig(db, obj2)
require.NoError(t, err)
- // inhume with tombstone
- addrTombstone := oidtest.Address()
- addrTombstone.SetContainer(cnr)
+ id1, _ := obj1.ID()
+ id2, _ := obj2.ID()
+ ts := objectSDK.NewTombstone()
+ ts.SetMembers([]oid.ID{id1, id2})
+ objTs := objectSDK.New()
+ objTs.SetContainerID(cnr)
+ objTs.SetType(objectSDK.TypeTombstone)
+
+ data, _ := ts.Marshal()
+ objTs.SetPayload(data)
+ require.NoError(t, objectSDK.CalculateAndSetID(objTs))
+ require.NoError(t, putBig(db, objTs))
+
+ addrTombstone := object.AddressOf(objTs)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
@@ -435,8 +448,11 @@ func TestDB_DropGraves(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 2, counter)
- err = db.DropGraves(context.Background(), buriedTS)
+ res, err := db.InhumeTombstones(context.Background(), buriedTS)
require.NoError(t, err)
+ require.EqualValues(t, 1, res.LogicInhumed())
+ require.EqualValues(t, 0, res.UserInhumed())
+ require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID())
counter = 0
iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error {
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 12f27d330..76018fb61 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -205,7 +205,7 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
success = err == nil
if success {
for _, addr := range prm.target {
- storagelog.Write(db.log,
+ storagelog.Write(ctx, db.log,
storagelog.AddressField(addr),
storagelog.OpField("metabase INHUME"))
}
@@ -217,85 +217,93 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes
garbageBKT := tx.Bucket(garbageBucketName)
graveyardBKT := tx.Bucket(graveyardBucketName)
- bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, &prm)
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
if err != nil {
return err
}
buf := make([]byte, addressKeySize)
for i := range prm.target {
- id := prm.target[i].Object()
- cnr := prm.target[i].Container()
-
- // prevent locked objects to be inhumed
- if !prm.forceRemoval && objectLocked(tx, cnr, id) {
- return new(apistatus.ObjectLocked)
- }
-
- var lockWasChecked bool
-
- // prevent lock objects to be inhumed
- // if `Inhume` was called not with the
- // `WithForceGCMark` option
- if !prm.forceRemoval {
- if isLockObject(tx, cnr, id) {
- return ErrLockObjectRemoval
- }
-
- lockWasChecked = true
- }
-
- obj, err := db.get(tx, prm.target[i], buf, false, true, epoch)
- targetKey := addressKey(prm.target[i], buf)
- var ecErr *objectSDK.ECInfoError
- if err == nil {
- err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
- if err != nil {
- return err
- }
- } else if errors.As(err, &ecErr) {
- err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
- if err != nil {
- return err
- }
- }
-
- if prm.tomb != nil {
- var isTomb bool
- isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
- if err != nil {
- return err
- }
-
- if isTomb {
- continue
- }
- }
-
- // consider checking if target is already in graveyard?
- err = bkt.Put(targetKey, value)
- if err != nil {
+ if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil {
return err
}
-
- if prm.lockObjectHandling {
- // do not perform lock check if
- // it was already called
- if lockWasChecked {
- // inhumed object is not of
- // the LOCK type
- continue
- }
-
- if isLockObject(tx, cnr, id) {
- res.deletedLockObj = append(res.deletedLockObj, prm.target[i])
- }
- }
}
return db.applyInhumeResToCounters(tx, res)
}
+func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error {
+ id := addr.Object()
+ cnr := addr.Container()
+ tx := bkt.Tx()
+
+ // prevent locked objects to be inhumed
+ if !prm.forceRemoval && objectLocked(tx, cnr, id) {
+ return new(apistatus.ObjectLocked)
+ }
+
+ var lockWasChecked bool
+
+ // prevent lock objects to be inhumed
+ // if `Inhume` was called not with the
+ // `WithForceGCMark` option
+ if !prm.forceRemoval {
+ if isLockObject(tx, cnr, id) {
+ return ErrLockObjectRemoval
+ }
+
+ lockWasChecked = true
+ }
+
+ obj, err := db.get(tx, addr, buf, false, true, epoch)
+ targetKey := addressKey(addr, buf)
+ var ecErr *objectSDK.ECInfoError
+ if err == nil {
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
+ if err != nil {
+ return err
+ }
+ } else if errors.As(err, &ecErr) {
+ err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
+ if err != nil {
+ return err
+ }
+ }
+
+ if prm.tomb != nil {
+ var isTomb bool
+ isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
+ if err != nil {
+ return err
+ }
+
+ if isTomb {
+ return nil
+ }
+ }
+
+ // consider checking if target is already in graveyard?
+ err = bkt.Put(targetKey, value)
+ if err != nil {
+ return err
+ }
+
+ if prm.lockObjectHandling {
+ // do not perform lock check if
+ // it was already called
+ if lockWasChecked {
+ // inhumed object is not of
+ // the LOCK type
+ return nil
+ }
+
+ if isLockObject(tx, cnr, id) {
+ res.deletedLockObj = append(res.deletedLockObj, addr)
+ }
+ }
+ return nil
+}
+
func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes,
garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket,
ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte,
@@ -334,10 +342,10 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I
}
func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
- if err := db.updateShardObjectCounter(tx, logical, res.LogicInhumed(), false); err != nil {
+ if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil {
return err
}
- if err := db.updateShardObjectCounter(tx, user, res.UserInhumed(), false); err != nil {
+ if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil {
return err
}
@@ -354,7 +362,7 @@ func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
// 1. tombstone address if Inhume was called with
// a Tombstone
// 2. zeroValue if Inhume was called with a GC mark
-func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm *InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
+func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
if prm.tomb != nil {
targetBucket = graveyardBKT
tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
@@ -365,7 +373,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
if data != nil {
err := targetBucket.Delete(tombKey)
if err != nil {
- return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err)
+ return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err)
}
}
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
index 32e412c79..180713287 100644
--- a/pkg/local_object_storage/metabase/inhume_ec_test.go
+++ b/pkg/local_object_storage/metabase/inhume_ec_test.go
@@ -25,8 +25,8 @@ func TestInhumeECObject(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close()) }()
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
ecChunk := oidtest.ID()
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index 277316f7b..786d10396 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -17,7 +17,7 @@ import (
func TestDB_Inhume(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
raw := testutil.GenerateObject()
testutil.AddAttribute(raw, "foo", "bar")
@@ -37,7 +37,7 @@ func TestDB_Inhume(t *testing.T) {
func TestInhumeTombOnTomb(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
var (
err error
@@ -107,7 +107,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
func TestInhumeLocked(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
locked := oidtest.Address()
diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go
index d44c51fb2..9cccd7dad 100644
--- a/pkg/local_object_storage/metabase/iterators.go
+++ b/pkg/local_object_storage/metabase/iterators.go
@@ -3,7 +3,6 @@ package meta
import (
"context"
"errors"
- "fmt"
"strconv"
"time"
@@ -12,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
-
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
@@ -111,70 +109,6 @@ func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler)
return nil
}
-// IterateCoveredByTombstones iterates over all objects in DB which are covered
-// by tombstone with string address from tss. Locked objects are not included
-// (do not confuse with objects of type LOCK).
-//
-// If h returns ErrInterruptIterator, nil returns immediately.
-// Returns other errors of h directly.
-//
-// Does not modify tss.
-func (db *DB) IterateCoveredByTombstones(ctx context.Context, tss map[string]oid.Address, h func(oid.Address) error) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("IterateCoveredByTombstones", time.Since(startedAt), success)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateCoveredByTombstones")
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- return db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateCoveredByTombstones(tx, tss, h)
- })
-}
-
-func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error {
- bktGraveyard := tx.Bucket(graveyardBucketName)
-
- err := bktGraveyard.ForEach(func(k, v []byte) error {
- var addr oid.Address
- if err := decodeAddressFromKey(&addr, v); err != nil {
- return err
- }
- if _, ok := tss[addr.EncodeToString()]; ok {
- var addr oid.Address
-
- err := decodeAddressFromKey(&addr, k)
- if err != nil {
- return fmt.Errorf("could not parse address of the object under tombstone: %w", err)
- }
-
- if objectLocked(tx, addr.Container(), addr.Object()) {
- return nil
- }
-
- return h(addr)
- }
-
- return nil
- })
-
- if errors.Is(err, ErrInterruptIterator) {
- err = nil
- }
-
- return err
-}
-
func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error {
var cid cid.ID
var oid oid.ID
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 646dc196c..4c9579965 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -18,7 +17,7 @@ import (
func TestDB_IterateExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const epoch = 13
@@ -67,65 +66,3 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt
return object2.AddressOf(obj)
}
-
-func TestDB_IterateCoveredByTombstones(t *testing.T) {
- db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
-
- cnr := cidtest.ID()
- ts := oidtest.Address()
- protected1 := oidtest.Address()
- protected2 := oidtest.Address()
- protectedLocked := oidtest.Address()
- garbage := oidtest.Address()
- ts.SetContainer(cnr)
- protected1.SetContainer(cnr)
- protected2.SetContainer(cnr)
- protectedLocked.SetContainer(cnr)
-
- var prm meta.InhumePrm
- var err error
-
- prm.SetAddresses(protected1, protected2, protectedLocked)
- prm.SetTombstoneAddress(ts)
-
- _, err = db.Inhume(context.Background(), prm)
- require.NoError(t, err)
-
- prm.SetAddresses(garbage)
- prm.SetGCMark()
-
- _, err = db.Inhume(context.Background(), prm)
- require.NoError(t, err)
-
- var handled []oid.Address
-
- tss := map[string]oid.Address{
- ts.EncodeToString(): ts,
- }
-
- err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
- handled = append(handled, addr)
- return nil
- })
- require.NoError(t, err)
-
- require.Len(t, handled, 3)
- require.Contains(t, handled, protected1)
- require.Contains(t, handled, protected2)
- require.Contains(t, handled, protectedLocked)
-
- err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
- require.NoError(t, err)
-
- handled = handled[:0]
-
- err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
- handled = append(handled, addr)
- return nil
- })
- require.NoError(t, err)
-
- require.Len(t, handled, 2)
- require.NotContains(t, handled, protectedLocked)
-}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index b007ef0da..2a0bd7f6a 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -87,7 +87,8 @@ type CountAliveObjectsInContainerPrm struct {
}
// ListWithCursor lists physical objects available in metabase starting from
-// cursor. Includes objects of all types. Does not include inhumed objects.
+// cursor. Includes objects of all types. Does not include inhumed and expired
+// objects.
// Use cursor value from response for consecutive requests.
//
// Returns ErrEndOfListing if there are no more objects to return or count
@@ -138,11 +139,12 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
var containerID cid.ID
var offset []byte
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
+ bc := newBucketCache()
rawAddr := make([]byte, cidSize, addressKeySize)
+ currEpoch := db.epochState.CurrentEpoch()
+
loop:
for ; name != nil; name, _ = c.Next() {
cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name)
@@ -166,8 +168,8 @@ loop:
bkt := tx.Bucket(name)
if bkt != nil {
copy(rawAddr, cidRaw)
- result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
- result, count, cursor, threshold)
+ result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
+ result, count, cursor, threshold, currEpoch)
if err != nil {
return nil, nil, err
}
@@ -185,8 +187,7 @@ loop:
if offset != nil {
// new slice is much faster but less memory efficient
// we need to copy, because offset exists during bbolt tx
- cursor.inBucketOffset = make([]byte, len(offset))
- copy(cursor.inBucketOffset, offset)
+ cursor.inBucketOffset = bytes.Clone(offset)
}
if len(result) == 0 {
@@ -195,29 +196,29 @@ loop:
// new slice is much faster but less memory efficient
// we need to copy, because bucketName exists during bbolt tx
- cursor.bucketName = make([]byte, len(bucketName))
- copy(cursor.bucketName, bucketName)
+ cursor.bucketName = bytes.Clone(bucketName)
return result, cursor, nil
}
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects.
-func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
+func selectNFromBucket(
+ bc *bucketCache,
+ bkt *bbolt.Bucket, // main bucket
objType objectSDK.Type, // type of the objects stored in the main bucket
- graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID
to []objectcore.Info, // listing result
limit int, // stop listing at `limit` items in result
cursor *Cursor, // start from cursor object
threshold bool, // ignore cursor and start immediately
+ currEpoch uint64,
) ([]objectcore.Info, []byte, *Cursor, error) {
if cursor == nil {
cursor = new(Cursor)
}
- count := len(to)
c := bkt.Cursor()
k, v := c.First()
@@ -229,7 +230,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
}
for ; k != nil; k, v = c.Next() {
- if count >= limit {
+ if len(to) >= limit {
break
}
@@ -239,17 +240,25 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
}
offset = k
+ graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
+ garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue
}
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return nil, nil, nil, err
+ }
+
+ expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
+ if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
+ continue
+ }
+
var isLinkingObj bool
var ecInfo *objectcore.ECInfo
if objType == objectSDK.TypeRegular {
- var o objectSDK.Object
- if err := o.Unmarshal(v); err != nil {
- return nil, nil, nil, err
- }
isLinkingObj = isLinkObject(&o)
ecHeader := o.ECHeader()
if ecHeader != nil {
@@ -265,7 +274,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
a.SetContainer(cnt)
a.SetObject(obj)
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
- count++
}
return to, offset, cursor, nil
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index 203802ec0..02985991c 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -3,14 +3,17 @@ package meta_test
import (
"context"
"errors"
+ "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
@@ -18,6 +21,8 @@ import (
func BenchmarkListWithCursor(b *testing.B) {
db := listWithCursorPrepareDB(b)
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
+
b.Run("1 item", func(b *testing.B) {
benchmarkListWithCursor(b, db, 1)
})
@@ -33,7 +38,6 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
NoSync: true,
})) // faster single-thread generation
- defer func() { require.NoError(b, db.Close()) }()
obj := testutil.GenerateObject()
for i := range 100_000 { // should be a multiple of all batch sizes
@@ -55,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
for range b.N {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
- if err != meta.ErrEndOfListing {
+ if !errors.Is(err, meta.ErrEndOfListing) {
b.Fatalf("error: %v", err)
}
prm.SetCursor(nil)
@@ -70,14 +74,16 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
func TestLisObjectsWithCursor(t *testing.T) {
t.Parallel()
- db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
-
const (
+ currEpoch = 100
+ expEpoch = currEpoch - 1
containers = 5
- total = containers * 4 // regular + ts + child + lock
+ total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired
)
+ db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
expected := make([]object.Info, 0, total)
// fill metabase with objects
@@ -126,6 +132,26 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, child)
require.NoError(t, err)
expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
+
+ // add expired object (do not include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+
+ // add non-expired object (include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
+
+ // add locked expired object (include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ objID := oidtest.ID()
+ obj.SetID(objID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+ require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID}))
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
}
t.Run("success with various count", func(t *testing.T) {
@@ -163,7 +189,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const total = 5
@@ -225,7 +251,7 @@ func TestIterateOver(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const total uint64 = 5
for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} {
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index 6b78ef392..f4cb9e53b 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -4,8 +4,10 @@ import (
"bytes"
"context"
"fmt"
+ "slices"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -62,9 +64,7 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.
return ErrReadOnlyMode
}
- if len(locked) == 0 {
- panic("empty locked list")
- }
+ assert.False(len(locked) == 0, "empty locked list")
err := db.lockInternal(locked, cnr, locker)
success = err == nil
@@ -162,7 +162,11 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
// checks if specified object is locked in the specified container.
func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- bucketLocked := tx.Bucket(bucketNameLocked)
+ return objectLockedWithCache(nil, tx, idCnr, idObj)
+}
+
+func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
+ bucketLocked := getLockedBucket(bc, tx)
if bucketLocked != nil {
key := make([]byte, cidSize)
idCnr.Encode(key)
@@ -176,7 +180,7 @@ func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
}
// return `LOCK` id's if specified object is locked in the specified container.
-func getLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
+func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
var lockers []oid.ID
bucketLocked := tx.Bucket(bucketNameLocked)
if bucketLocked != nil {
@@ -250,7 +254,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres
unlockedObjects = append(unlockedObjects, addr)
} else {
// exclude locker
- keyLockers = append(keyLockers[:i], keyLockers[i+1:]...)
+ keyLockers = slices.Delete(keyLockers, i, i+1)
v, err = encodeList(keyLockers)
if err != nil {
@@ -351,20 +355,20 @@ func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, e
return res, err
}
-// GetLocked return `LOCK` id's if provided object is locked by any `LOCK`. Not found
+// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found
// object is considered as non-locked.
//
// Returns only non-logical errors related to underlying database.
-func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
+func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("GetLocked", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocked",
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
@@ -377,7 +381,7 @@ func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, er
return res, ErrDegradedMode
}
err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res, err = getLocked(tx, addr.Container(), addr.Object())
+ res, err = getLocks(tx, addr.Container(), addr.Object())
return nil
}))
success = err == nil
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 9601cb2be..341ff9ad1 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -21,7 +21,7 @@ func TestDB_Lock(t *testing.T) {
cnr := cidtest.ID()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
t.Run("empty locked list", func(t *testing.T) {
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
@@ -187,7 +187,7 @@ func TestDB_Lock_Expired(t *testing.T) {
es := &epochState{e: 123}
db := newDB(t, meta.WithEpochState(es))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// put an object
addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
@@ -209,7 +209,7 @@ func TestDB_IsLocked(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// existing and locked objs
diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go
index 2032ed6b2..7edb96384 100644
--- a/pkg/local_object_storage/metabase/mode.go
+++ b/pkg/local_object_storage/metabase/mode.go
@@ -1,6 +1,7 @@
package meta
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -8,7 +9,7 @@ import (
// SetMode sets the metabase mode of operation.
// If the mode assumes no operation metabase, the database is closed.
-func (db *DB) SetMode(m mode.Mode) error {
+func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
@@ -17,20 +18,20 @@ func (db *DB) SetMode(m mode.Mode) error {
}
if !db.mode.NoMetabase() {
- if err := db.Close(); err != nil {
- return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ if err := db.Close(ctx); err != nil {
+ return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
if m.NoMetabase() {
db.boltDB = nil
} else {
- err := db.openDB(m)
+ err := db.openDB(ctx, m)
if err == nil && !m.ReadOnly() {
- err = db.Init()
+ err = db.Init(ctx)
}
if err != nil {
- return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go
index 1b9f60055..28b42283f 100644
--- a/pkg/local_object_storage/metabase/mode_test.go
+++ b/pkg/local_object_storage/metabase/mode_test.go
@@ -25,13 +25,13 @@ func Test_Mode(t *testing.T) {
require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init())
+ require.NoError(t, bdb.Init(context.Background()))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close())
+ require.NoError(t, bdb.Close(context.Background()))
require.NoError(t, bdb.Open(context.Background(), mode.Degraded))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init())
+ require.NoError(t, bdb.Init(context.Background()))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close())
+ require.NoError(t, bdb.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 09c5e04ad..5e1bbfe9e 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -100,7 +100,7 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
})
if err == nil {
success = true
- storagelog.Write(db.log,
+ storagelog.Write(ctx, db.log,
storagelog.AddressField(objectCore.AddressOf(prm.obj)),
storagelog.OpField("metabase PUT"))
}
@@ -120,9 +120,15 @@ func (db *DB) put(tx *bbolt.Tx,
return PutRes{}, errors.New("missing container in object")
}
+ var ecParentAddress oid.Address
+ if ecHeader := obj.ECHeader(); ecHeader != nil {
+ ecParentAddress.SetContainer(cnr)
+ ecParentAddress.SetObject(ecHeader.Parent())
+ }
+
isParent := si != nil
- exists, _, err := db.exists(tx, objectCore.AddressOf(obj), oid.Address{}, currEpoch)
+ exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch)
var splitInfoError *objectSDK.SplitInfoError
if errors.As(err, &splitInfoError) {
@@ -173,18 +179,18 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
err := putUniqueIndexes(tx, obj, si, id)
if err != nil {
- return fmt.Errorf("can't put unique indexes: %w", err)
+ return fmt.Errorf("put unique indexes: %w", err)
}
err = updateListIndexes(tx, obj, putListIndexItem)
if err != nil {
- return fmt.Errorf("can't put list indexes: %w", err)
+ return fmt.Errorf("put list indexes: %w", err)
}
if indexAttributes {
err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
if err != nil {
- return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
+ return fmt.Errorf("put fake bucket tree indexes: %w", err)
}
}
@@ -243,7 +249,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad
}
rawObject, err := obj.CutPayload().Marshal()
if err != nil {
- return fmt.Errorf("can't marshal object header: %w", err)
+ return fmt.Errorf("marshal object header: %w", err)
}
return putUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
@@ -468,7 +474,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck
func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
+ return fmt.Errorf("create index %v: %w", item.name, err)
}
data, err := update(bkt.Get(item.key), item.val)
@@ -485,12 +491,12 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
+ return fmt.Errorf("create index %v: %w", item.name, err)
}
fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
if err != nil {
- return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
+ return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err)
}
return fkbtRoot.Put(item.val, zeroValue)
@@ -499,19 +505,19 @@ func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
+ return fmt.Errorf("create index %v: %w", item.name, err)
}
lst, err := decodeList(bkt.Get(item.key))
if err != nil {
- return fmt.Errorf("can't decode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("decode leaf list %v: %w", item.key, err)
}
lst = append(lst, item.val)
encodedLst, err := encodeList(lst)
if err != nil {
- return fmt.Errorf("can't encode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("encode leaf list %v: %w", item.key, err)
}
return bkt.Put(item.key, encodedLst)
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index 914f5ef06..f37ed4cf2 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -46,7 +46,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(runtime.NumCPU()))
- defer func() { require.NoError(b, db.Close()) }()
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
// Ensure the benchmark is bound by CPU and not waiting batch-delay time.
b.SetParallelism(1)
@@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(1))
- defer func() { require.NoError(b, db.Close()) }()
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
var index atomic.Int64
index.Store(-1)
objs := prepareObjects(b.N)
@@ -84,7 +84,7 @@ func BenchmarkPut(b *testing.B) {
func TestDB_PutBlobovniczaUpdate(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
raw1 := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4}
diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go
index 993079dce..5f0956f0b 100644
--- a/pkg/local_object_storage/metabase/reset_test.go
+++ b/pkg/local_object_storage/metabase/reset_test.go
@@ -30,14 +30,14 @@ func TestResetDropsContainerBuckets(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
for idx := range 100 {
var putPrm PutPrm
putPrm.SetObject(testutil.GenerateObject())
- putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
+ putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx))
_, err := db.Put(context.Background(), putPrm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index f802036be..60da50671 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -131,6 +131,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
res := make([]oid.Address, 0, len(mAddr))
+ bc := newBucketCache()
for a, ind := range mAddr {
if ind != expLen {
continue // ignore objects with unmatched fast filters
@@ -145,7 +146,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(id)
- st, err := objectStatus(tx, addr, currEpoch)
+ st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
if err != nil {
return nil, err
}
@@ -153,7 +154,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
continue // ignore removed objects
}
- addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch)
+ addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch)
if !match {
continue // ignore objects with unmatched slow filters
}
@@ -451,13 +452,13 @@ func (db *DB) selectObjectID(
}
// matchSlowFilters return true if object header is matched by all slow filters.
-func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
+func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
result := addr
if len(f) == 0 {
return result, true
}
- obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch)
+ obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch)
if err != nil {
return result, false
}
@@ -515,9 +516,9 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
return result, true
}
-func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
+func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
buf := make([]byte, addressKeySize)
- obj, err := db.get(tx, addr, buf, true, false, currEpoch)
+ obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch)
if err != nil {
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
@@ -527,7 +528,7 @@ func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch
continue
}
addr.SetObject(objID)
- obj, err = db.get(tx, addr, buf, true, false, currEpoch)
+ obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch)
if err == nil {
return obj, true, nil
}
@@ -565,7 +566,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt
case v2object.FilterHeaderContainerID: // support deprecated field
err := res.cnr.DecodeString(filters[i].Value())
if err != nil {
- return filterGroup{}, fmt.Errorf("can't parse container id: %w", err)
+ return filterGroup{}, fmt.Errorf("parse container id: %w", err)
}
res.withCnrFilter = true
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 6f48607be..ce2156d2e 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -38,7 +38,7 @@ func testSelectUserAttributes(t *testing.T, index bool) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -200,7 +200,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -354,7 +354,7 @@ func TestDB_SelectInhume(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -385,7 +385,7 @@ func TestDB_SelectPayloadHash(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -456,7 +456,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -564,7 +564,7 @@ func TestDB_SelectObjectID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -680,7 +680,7 @@ func TestDB_SelectOwnerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -786,7 +786,7 @@ func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
ecChunk1 := oidtest.ID()
@@ -865,7 +865,7 @@ func TestDB_RawHead_SplitInfo(t *testing.T) {
)
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -906,7 +906,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
t.Run("first last, then linking", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
require.NoError(t, metaPut(db, lastPart, nil))
require.NoError(t, metaPut(db, linking, nil))
@@ -930,7 +930,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
})
t.Run("first linking, then last", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
require.NoError(t, metaPut(db, linking, nil))
require.NoError(t, metaPut(db, lastPart, nil))
@@ -954,7 +954,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
})
t.Run("only last part", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
require.NoError(t, metaPut(db, lastPart, nil))
@@ -984,7 +984,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) {
)
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -1052,7 +1052,7 @@ func TestDB_SelectSplitID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -1109,7 +1109,7 @@ func TestDB_SelectContainerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -1157,7 +1157,7 @@ func TestDB_SelectContainerID(t *testing.T) {
func BenchmarkSelect(b *testing.B) {
const objCount = 1000
db := newDB(b)
- defer func() { require.NoError(b, db.Close()) }()
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
cid := cidtest.ID()
@@ -1199,7 +1199,7 @@ func TestExpiredObjects(t *testing.T) {
t.Parallel()
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
cidExp, _ := exp.ContainerID()
@@ -1216,6 +1216,8 @@ func TestExpiredObjects(t *testing.T) {
}
func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) {
+ b.ReportAllocs()
+
var prm meta.SelectPrm
prm.SetContainerID(cid)
prm.SetFilters(fs)
diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go
index 88446494e..72618b1a0 100644
--- a/pkg/local_object_storage/metabase/shard_id.go
+++ b/pkg/local_object_storage/metabase/shard_id.go
@@ -2,6 +2,7 @@ package meta
import (
"bytes"
+ "context"
"errors"
"fmt"
"os"
@@ -21,7 +22,7 @@ var (
// If id is missing, returns nil, nil.
//
// GetShardID does not report any metrics.
-func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) {
+func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = mode
@@ -30,14 +31,14 @@ func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) {
return nil, nil
}
- if err := db.openDB(mode); err != nil {
- return nil, fmt.Errorf("failed to open metabase: %w", err)
+ if err := db.openDB(ctx, mode); err != nil {
+ return nil, fmt.Errorf("open metabase: %w", err)
}
id, err := db.readShardID()
if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
+ err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
}
return id, metaerr.Wrap(err)
@@ -59,7 +60,7 @@ func (db *DB) readShardID() ([]byte, error) {
// SetShardID sets metabase operation mode
// and writes shard id to db.
-func (db *DB) SetShardID(id []byte, mode metamode.Mode) error {
+func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = mode
@@ -68,8 +69,8 @@ func (db *DB) SetShardID(id []byte, mode metamode.Mode) error {
return ErrReadOnlyMode
}
- if err := db.openDB(mode); err != nil {
- return fmt.Errorf("failed to open metabase: %w", err)
+ if err := db.openDB(ctx, mode); err != nil {
+ return fmt.Errorf("open metabase: %w", err)
}
err := db.writeShardID(id)
@@ -78,7 +79,7 @@ func (db *DB) SetShardID(id []byte, mode metamode.Mode) error {
}
if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
+ err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
}
return metaerr.Wrap(err)
diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go
index 6d620b41a..8f2376503 100644
--- a/pkg/local_object_storage/metabase/storage_id.go
+++ b/pkg/local_object_storage/metabase/storage_id.go
@@ -35,7 +35,7 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
-func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
+func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) {
var (
startedAt = time.Now()
success = false
@@ -53,32 +53,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
+ var res StorageIDRes
if db.mode.NoMetabase() {
return res, ErrDegradedMode
}
- err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.id, err = db.storageID(tx, prm.addr)
-
- return err
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ res.id = db.storageID(tx, prm.addr)
+ return nil
})
success = err == nil
return res, metaerr.Wrap(err)
}
-func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) {
+func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte {
key := make([]byte, bucketKeySize)
smallBucket := tx.Bucket(smallBucketName(addr.Container(), key))
if smallBucket == nil {
- return nil, nil
+ return nil
}
storageID := smallBucket.Get(objectKey(addr.Object(), key))
if storageID == nil {
- return nil, nil
+ return nil
}
- return bytes.Clone(storageID), nil
+ return bytes.Clone(storageID)
}
// UpdateStorageIDPrm groups the parameters of UpdateStorageID operation.
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index a86e42bd2..fef680159 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -15,7 +15,7 @@ func TestDB_StorageID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
raw1 := testutil.GenerateObject()
raw2 := testutil.GenerateObject()
@@ -79,7 +79,7 @@ func TestPutWritecacheDataRace(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
putStorageID := []byte{1, 2, 3}
wcStorageID := []byte{1, 2, 3, 4, 5}
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index bcf72f440..4948f3424 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -95,7 +95,7 @@ func compactDB(db *bbolt.DB) error {
NoSync: true,
})
if err != nil {
- return fmt.Errorf("can't open new metabase to compact: %w", err)
+ return fmt.Errorf("open new metabase to compact: %w", err)
}
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
@@ -292,7 +292,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
}
expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
if err != nil {
- return fmt.Errorf("could not parse expiration epoch: %w", err)
+ return fmt.Errorf("parse expiration epoch: %w", err)
}
expirationEpochBucket := b.Bucket(attrValue)
attrKeyValueC := expirationEpochBucket.Cursor()
@@ -360,7 +360,7 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv
return nil
}
last = keys[len(keys)-1]
- cnt, err := dropNonIndexedUserAttributeBuckets(db, cs, keys)
+ cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys)
if err != nil {
log("deleting user attribute buckets completed with an error:", err)
return err
@@ -376,8 +376,8 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv
}
}
-func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
- keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs)
+func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
+ keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs)
if err != nil {
return 0, fmt.Errorf("select non indexed user attributes: %w", err)
}
@@ -394,12 +394,12 @@ func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider,
return uint64(len(keysToDrop)), nil
}
-func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
+func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
var keysToDrop [][]byte
for _, key := range keys {
attr, ok := attributeFromAttributeBucket(key)
if !ok {
- return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
+ return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
}
if !IsAtrributeIndexed(attr) {
keysToDrop = append(keysToDrop, key)
@@ -407,9 +407,9 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([]
}
contID, ok := cidFromAttributeBucket(key)
if !ok {
- return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
+ return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
}
- info, err := cs.Info(contID)
+ info, err := cs.Info(ctx, contID)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index aeb14aeb6..c90de4dd6 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -34,18 +34,18 @@ func TestUpgradeV2ToV3(t *testing.T) {
}()
db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t)))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.ErrorIs(t, db.Init(), ErrOutdatedVersion)
- require.NoError(t, db.Close())
+ require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion)
+ require.NoError(t, db.Close(context.Background()))
require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
fmt.Println()
}
type testContainerInfoProvider struct{}
-func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) {
+func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) {
return container.Info{}, nil
}
@@ -87,7 +87,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
db.boltDB.AllocSize = allocSize
db.boltDB.NoSync = true
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
containers := make([]cid.ID, containersCount)
for i := range containers {
containers[i] = cidtest.ID()
@@ -113,7 +113,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("simple objects generated")
+ db.log.Info(ctx, "simple objects generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// complex objects
@@ -137,7 +137,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("complex objects generated")
+ db.log.Info(ctx, "complex objects generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects deleted by gc marks
@@ -159,7 +159,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("simple objects deleted by gc marks generated")
+ db.log.Info(ctx, "simple objects deleted by gc marks generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(10000)
// simple objects deleted by tombstones
@@ -189,7 +189,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("simple objects deleted by tombstones generated")
+ db.log.Info(ctx, "simple objects deleted by tombstones generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects locked by locks
@@ -216,7 +216,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("simple objects locked by locks generated")
+ db.log.Info(ctx, "simple objects locked by locks generated")
require.NoError(t, db.boltDB.Sync())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 0a2f91a47..4ad83332b 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -231,11 +232,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
epoch := binary.BigEndian.Uint64(key)
var cnr cid.ID
if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err)
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err)
}
var obj oid.ID
if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err)
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err)
}
return epoch, cnr, obj, nil
}
@@ -278,9 +279,7 @@ func objectKey(obj oid.ID, key []byte) []byte {
//
// firstIrregularObjectType(tx, cnr, obj) usage allows getting object type.
func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type {
- if len(objs) == 0 {
- panic("empty object list in firstIrregularObjectType")
- }
+ assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType")
var keys [2][1 + cidSize]byte
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index 048bb9af6..fbc0f1ad9 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
- return fmt.Errorf("can't create auxiliary bucket: %w", err)
+ return fmt.Errorf("create auxiliary bucket: %w", err)
}
return b.Put(versionKey, data)
}
diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go
index 75229a1b4..b373fb32e 100644
--- a/pkg/local_object_storage/metabase/version_test.go
+++ b/pkg/local_object_storage/metabase/version_test.go
@@ -45,25 +45,25 @@ func TestVersion(t *testing.T) {
t.Run("simple", func(t *testing.T) {
db := newDB(t)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
t.Run("reopen", func(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
})
t.Run("old data", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.SetShardID([]byte{1, 2, 3, 4}, mode.ReadWrite))
+ require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
t.Run("invalid version", func(t *testing.T) {
db := newDB(t)
@@ -71,37 +71,37 @@ func TestVersion(t *testing.T) {
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return updateVersion(tx, version+1)
}))
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.Error(t, db.Init())
- require.NoError(t, db.Close())
+ require.Error(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
t.Run("reset", func(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.Reset())
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
})
t.Run("incompleted upgrade", func(t *testing.T) {
db := newDB(t)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue)
}))
- require.ErrorIs(t, db.Init(), ErrIncompletedUpgrade)
- require.NoError(t, db.Close())
+ require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade)
+ require.NoError(t, db.Close(context.Background()))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(shardInfoBucket).Delete(upgradeKey)
}))
- require.NoError(t, db.Init())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
})
}
diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go
index 22b951a41..3156751f2 100644
--- a/pkg/local_object_storage/pilorama/bench_test.go
+++ b/pkg/local_object_storage/pilorama/bench_test.go
@@ -28,8 +28,8 @@ func BenchmarkCreate(b *testing.B) {
WithPath(filepath.Join(tmpDir, "test.db")),
WithMaxBatchSize(runtime.GOMAXPROCS(0)))
require.NoError(b, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(b, f.Init())
- defer func() { require.NoError(b, f.Close()) }()
+ require.NoError(b, f.Init(context.Background()))
+ defer func() { require.NoError(b, f.Close(context.Background())) }()
b.Cleanup(func() {
require.NoError(b, os.RemoveAll(tmpDir))
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 7bce1f340..897b37ea0 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -91,7 +91,7 @@ func NewBoltForest(opts ...Option) ForestStorage {
return &b
}
-func (t *boltForest) SetMode(m mode.Mode) error {
+func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
t.modeMtx.Lock()
defer t.modeMtx.Unlock()
@@ -99,14 +99,14 @@ func (t *boltForest) SetMode(m mode.Mode) error {
return nil
}
- err := t.Close()
+ err := t.Close(ctx)
if err == nil && !m.NoMetabase() {
if err = t.openBolt(m); err == nil {
- err = t.Init()
+ err = t.Init(ctx)
}
}
if err != nil {
- return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
+ return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
}
t.mode = m
@@ -128,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
readOnly := m.ReadOnly()
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err))
+ return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err))
}
opts := *bbolt.DefaultOptions
@@ -139,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
t.db, err = bbolt.Open(t.path, t.perm, &opts)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err))
+ return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err))
}
t.db.MaxBatchSize = t.maxBatchSize
@@ -148,7 +148,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
return nil
}
-func (t *boltForest) Init() error {
+func (t *boltForest) Init(context.Context) error {
if t.mode.NoMetabase() || t.db.IsReadOnly() {
return nil
}
@@ -162,7 +162,7 @@ func (t *boltForest) Init() error {
})
}
-func (t *boltForest) Close() error {
+func (t *boltForest) Close(context.Context) error {
var err error
if t.db != nil {
err = t.db.Close()
@@ -419,10 +419,7 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri
return err
}
- i, node, err := t.getPathPrefix(bTree, attr, path)
- if err != nil {
- return err
- }
+ i, node := t.getPathPrefix(bTree, attr, path)
ts := t.getLatestTimestamp(bLog, d.Position, d.Size)
lm = make([]Move, len(path)-i+1)
@@ -558,6 +555,80 @@ func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string
return metaerr.Wrap(err)
}
+func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ m, err := t.filterSeen(cnr, treeID, m)
+ if err != nil {
+ return err
+ }
+ if len(m) == 0 {
+ success = true
+ return nil
+ }
+
+ ch := make(chan error)
+ b := &batch{
+ forest: t,
+ cid: cnr,
+ treeID: treeID,
+ results: []chan<- error{ch},
+ operations: m,
+ }
+ go func() {
+ b.run()
+ }()
+ err = <-ch
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) {
+ t.modeMtx.RLock()
+ defer t.modeMtx.RUnlock()
+
+ if t.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ ops := make([]*Move, 0, len(m))
+ err := t.db.View(func(tx *bbolt.Tx) error {
+ treeRoot := tx.Bucket(bucketName(cnr, treeID))
+ if treeRoot == nil {
+ ops = m
+ return nil
+ }
+ b := treeRoot.Bucket(logBucket)
+ for _, op := range m {
+ var logKey [8]byte
+ binary.BigEndian.PutUint64(logKey[:], op.Time)
+ seen := b.Get(logKey[:]) != nil
+ if !seen {
+ ops = append(ops, op)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, metaerr.Wrap(err)
+ }
+ return ops, nil
+}
+
// TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed.
func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error {
var (
@@ -906,10 +977,7 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st
b := treeRoot.Bucket(dataBucket)
- i, curNodes, err := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
- if err != nil {
- return err
- }
+ i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
if i < len(path)-1 {
return nil
}
@@ -1009,7 +1077,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol
}
// TreeSortedByFilename implements the Forest interface.
-func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) {
+func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
var (
startedAt = time.Now()
success = false
@@ -1087,7 +1155,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
}
if len(res) != 0 {
s := string(findAttr(res[len(res)-1].Meta, AttributeFilename))
- last = &s
+ last = NewCursor(s, res[len(res)-1].LastChild())
}
return res, last, metaerr.Wrap(err)
}
@@ -1098,10 +1166,10 @@ func sortByFilename(nodes []NodeInfo) {
})
}
-func sortAndCut(result []NodeInfo, last *string) []NodeInfo {
+func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo {
var lastBytes []byte
if last != nil {
- lastBytes = []byte(*last)
+ lastBytes = []byte(last.GetFilename())
}
sortByFilename(result)
@@ -1166,7 +1234,7 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f
nodes = nil
length = actualLength + 1
count = 0
- c.Seek(append(prefix, byte(length), byte(length>>8)))
+ c.Seek(binary.LittleEndian.AppendUint16(prefix, length))
c.Prev() // c.Next() will be performed by for loop
}
}
@@ -1286,7 +1354,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err
return nil
})
if err != nil {
- return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err))
+ return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err))
}
success = true
return ids, nil
@@ -1430,7 +1498,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
var contID cidSDK.ID
if err := contID.Decode(k[:32]); err != nil {
- return fmt.Errorf("failed to decode containerID: %w", err)
+ return fmt.Errorf("decode container ID: %w", err)
}
res.Items = append(res.Items, ContainerIDTreeID{
CID: contID,
@@ -1438,8 +1506,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
})
if len(res.Items) == batchSize {
- res.NextPageToken = make([]byte, len(k))
- copy(res.NextPageToken, k)
+ res.NextPageToken = bytes.Clone(k)
break
}
}
@@ -1452,7 +1519,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
return &res, nil
}
-func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node, error) {
+func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) {
c := bTree.Cursor()
var curNodes []Node
@@ -1475,14 +1542,14 @@ func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr strin
}
if len(nextNodes) == 0 {
- return i, curNodes, nil
+ return i, curNodes
}
}
- return len(path), nextNodes, nil
+ return len(path), nextNodes
}
-func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) {
+func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) {
c := bTree.Cursor()
var curNode Node
@@ -1502,10 +1569,10 @@ loop:
childKey, value = c.Next()
}
- return i, curNode, nil
+ return i, curNode
}
- return len(path), curNode, nil
+ return len(path), curNode
}
func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
@@ -1515,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:])
- return lm.Meta.FromBytes(data[16:])
+ return lm.FromBytes(data[16:])
}
func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter()
- size := 8 + 8 + lm.Meta.Size() + 1
+ size := 8 + 8 + lm.Size() + 1
// if lm.HasOld {
// size += 8 + lm.Old.Meta.Size()
// }
@@ -1528,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size)
w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent)
- lm.Meta.EncodeBinary(w.BinWriter)
+ lm.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld)
// if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent)
@@ -1590,7 +1657,7 @@ func internalKeyPrefix(key []byte, k string) []byte {
key = append(key, 'i')
l := len(k)
- key = append(key, byte(l), byte(l>>8))
+ key = binary.LittleEndian.AppendUint16(key, uint16(l))
key = append(key, k...)
return key
}
@@ -1605,14 +1672,10 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte {
key = internalKeyPrefix(key, k)
l := len(v)
- key = append(key, byte(l), byte(l>>8))
+ key = binary.LittleEndian.AppendUint16(key, uint16(l))
key = append(key, v...)
- var raw [8]byte
- binary.LittleEndian.PutUint64(raw[:], parent)
- key = append(key, raw[:]...)
-
- binary.LittleEndian.PutUint64(raw[:], node)
- key = append(key, raw[:]...)
+ key = binary.LittleEndian.AppendUint64(key, parent)
+ key = binary.LittleEndian.AppendUint64(key, node)
return key
}
diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go
index bb5c22e51..ebfd0bcc0 100644
--- a/pkg/local_object_storage/pilorama/forest.go
+++ b/pkg/local_object_storage/pilorama/forest.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"sort"
"strings"
@@ -84,8 +85,7 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID
s.operations = append(s.operations, op)
}
- mCopy := make([]KeyValue, len(m))
- copy(mCopy, m)
+ mCopy := slices.Clone(m)
op := s.do(&Move{
Parent: node,
Meta: Meta{
@@ -111,7 +111,16 @@ func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, o
return s.Apply(op)
}
-func (f *memoryForest) Init() error {
+func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error {
+ for _, op := range ops {
+ if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (f *memoryForest) Init(context.Context) error {
return nil
}
@@ -119,11 +128,11 @@ func (f *memoryForest) Open(context.Context, mode.Mode) error {
return nil
}
-func (f *memoryForest) SetMode(mode.Mode) error {
+func (f *memoryForest) SetMode(context.Context, mode.Mode) error {
return nil
}
-func (f *memoryForest) Close() error {
+func (f *memoryForest) Close(context.Context) error {
return nil
}
func (f *memoryForest) SetParentID(string) {}
@@ -155,7 +164,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string,
}
// TreeSortedByFilename implements the Forest interface.
-func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) {
+func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -168,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
var res []NodeInfo
for _, nodeID := range nodeIDs {
- children := s.tree.getChildren(nodeID)
+ children := s.getChildren(nodeID)
for _, childID := range children {
var found bool
for _, kv := range s.infoMap[childID].Meta.Items {
@@ -195,17 +204,14 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
r := mergeNodeInfos(res)
for i := range r {
- if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start {
- finish := i + count
- if len(res) < finish {
- finish = len(res)
- }
+ if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() {
+ finish := min(len(res), i+count)
last := string(findAttr(r[finish-1].Meta, AttributeFilename))
- return r[i:finish], &last, nil
+ return r[i:finish], NewCursor(last, 0), nil
}
}
last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
- return nil, &last, nil
+ return nil, NewCursor(last, 0), nil
}
// TreeGetChildren implements the Forest interface.
@@ -216,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str
return nil, ErrTreeNotFound
}
- children := s.tree.getChildren(nodeID)
+ children := s.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children))
for _, childID := range children {
res = append(res, NodeInfo{
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index fbcc53fb3..844084c55 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -30,7 +30,7 @@ var providers = []struct {
{"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
f := NewMemoryForest()
require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init())
+ require.NoError(t, f.Init(context.Background()))
return f
}},
{"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
@@ -40,7 +40,7 @@ var providers = []struct {
WithMaxBatchSize(1),
}, opts...)...)
require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init())
+ require.NoError(t, f.Init(context.Background()))
return f
}},
}
@@ -61,7 +61,7 @@ func TestForest_TreeMove(t *testing.T) {
}
func testForestTreeMove(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -125,7 +125,7 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
}
func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -247,7 +247,7 @@ func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
}
func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
}
var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *string, count int) *string {
+ treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err)
result = append(result, res...)
@@ -302,7 +302,7 @@ func TestForest_TreeSortedIteration(t *testing.T) {
}
func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
}
var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *string, count int) *string {
+ treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err)
result = append(result, res...)
@@ -361,7 +361,7 @@ func TestForest_TreeSortedFilename(t *testing.T) {
}
func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
const controlAttr = "control_attr"
cid := cidtest.ID()
@@ -453,7 +453,7 @@ func TestForest_TreeDrop(t *testing.T) {
}
func testForestTreeDrop(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
const cidsSize = 3
var cids [cidsSize]cidSDK.ID
@@ -523,7 +523,7 @@ func TestForest_TreeAdd(t *testing.T) {
}
func testForestTreeAdd(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -571,7 +571,7 @@ func TestForest_TreeAddByPath(t *testing.T) {
}
func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -709,7 +709,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
t.Run("add a child, then insert a parent removal", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
@@ -722,7 +722,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
})
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
testApply(t, s, 11, 10, meta)
@@ -792,7 +792,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
t.Run("expected", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
for i := range logs {
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
@@ -801,7 +801,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
})
s := constructor(t, WithMaxBatchSize(batchSize))
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
for range batchSize {
@@ -842,7 +842,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
}
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
t.Run("empty log, no panic", func(t *testing.T) {
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
@@ -883,7 +883,7 @@ func TestForest_TreeExists(t *testing.T) {
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
actual, err := s.TreeExists(context.Background(), cid, treeID)
@@ -942,7 +942,7 @@ func TestApplyTricky1(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1005,7 +1005,7 @@ func TestApplyTricky2(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1115,7 +1115,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close()) }()
+ defer func() { require.NoError(t, expected.Close(context.Background())) }()
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1145,7 +1145,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
wg.Wait()
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close())
+ require.NoError(t, actual.Close(context.Background()))
}
}
@@ -1163,7 +1163,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close()) }()
+ defer func() { require.NoError(t, expected.Close(context.Background())) }()
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1179,7 +1179,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close())
+ require.NoError(t, actual.Close(context.Background()))
}
}
@@ -1197,7 +1197,7 @@ func BenchmarkApplySequential(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close()) }()
+ defer func() { require.NoError(b, s.Close(context.Background())) }()
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
@@ -1233,7 +1233,7 @@ func BenchmarkApplyReorderLast(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close()) }()
+ defer func() { require.NoError(b, s.Close(context.Background())) }()
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
@@ -1290,7 +1290,7 @@ func TestTreeGetByPath(t *testing.T) {
}
func testTreeGetByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
treeID := "version"
@@ -1369,7 +1369,7 @@ func TestGetTrees(t *testing.T) {
}
func testTreeGetTrees(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
d := CIDDescriptor{Position: 0, Size: 1}
@@ -1415,7 +1415,7 @@ func TestTreeLastSyncHeight(t *testing.T) {
}
func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
- defer func() { require.NoError(t, f.Close()) }()
+ defer func() { require.NoError(t, f.Close(context.Background())) }()
cnr := cidtest.ID()
treeID := "someTree"
diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go
index 5a00bcf7a..b035be1e1 100644
--- a/pkg/local_object_storage/pilorama/heap.go
+++ b/pkg/local_object_storage/pilorama/heap.go
@@ -30,13 +30,13 @@ func (h *filenameHeap) Pop() any {
// fixedHeap maintains a fixed number of smallest elements started at some point.
type fixedHeap struct {
- start *string
+ start *Cursor
sorted bool
count int
h *filenameHeap
}
-func newHeap(start *string, count int) *fixedHeap {
+func newHeap(start *Cursor, count int) *fixedHeap {
h := new(filenameHeap)
heap.Init(h)
@@ -50,8 +50,19 @@ func newHeap(start *string, count int) *fixedHeap {
const amortizationMultiplier = 5
func (h *fixedHeap) push(id MultiNode, filename string) bool {
- if h.start != nil && filename <= *h.start {
- return false
+ if h.start != nil {
+ if filename < h.start.GetFilename() {
+ return false
+ } else if filename == h.start.GetFilename() {
+ // A tree may have a lot of nodes with the same filename but different versions so that
+ // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call
+ // with the same filename.
+ pos := slices.Index(id, h.start.GetNode())
+ if pos == -1 || pos+1 >= len(id) {
+ return false
+ }
+ id = id[pos+1:]
+ }
}
*h.h = append(*h.h, heapInfo{id: id, filename: filename})
diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go
index ce7b3db1e..28b7faec8 100644
--- a/pkg/local_object_storage/pilorama/inmemory.go
+++ b/pkg/local_object_storage/pilorama/inmemory.go
@@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree {
// undo un-does op and changes s in-place.
func (s *memoryTree) undo(op *move) {
if op.HasOld {
- s.tree.infoMap[op.Child] = op.Old
+ s.infoMap[op.Child] = op.Old
} else {
- delete(s.tree.infoMap, op.Child)
+ delete(s.infoMap, op.Child)
}
}
@@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move {
},
}
- shouldPut := !s.tree.isAncestor(op.Child, op.Parent)
- p, ok := s.tree.infoMap[op.Child]
+ shouldPut := !s.isAncestor(op.Child, op.Parent)
+ p, ok := s.infoMap[op.Child]
if ok {
lm.HasOld = true
lm.Old = p
@@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move {
p.Meta = m
p.Parent = op.Parent
- s.tree.infoMap[op.Child] = p
+ s.infoMap[op.Child] = p
return lm
}
@@ -192,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
}
var nodes []Node
- var lastTs Timestamp
+ var lastTS Timestamp
children := t.getChildren(curNode)
for i := range children {
@@ -200,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
fileName := string(info.Meta.GetAttr(attr))
if fileName == path[len(path)-1] {
if latest {
- if info.Meta.Time >= lastTs {
+ if info.Meta.Time >= lastTS {
nodes = append(nodes[:0], children[i])
}
} else {
diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go
index 61a3849bf..e1f6cd8e7 100644
--- a/pkg/local_object_storage/pilorama/interface.go
+++ b/pkg/local_object_storage/pilorama/interface.go
@@ -21,6 +21,8 @@ type Forest interface {
// TreeApply applies replicated operation from another node.
// If background is true, TreeApply will first check whether an operation exists.
TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
+ // TreeApplyBatch applies replicated operations from another node.
+ TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error
// TreeGetByPath returns all nodes corresponding to the path.
// The path is constructed by descending from the root using the values of the
// AttributeFilename in meta.
@@ -35,7 +37,7 @@ type Forest interface {
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
// TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error)
+ TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
@@ -60,10 +62,10 @@ type Forest interface {
type ForestStorage interface {
// DumpInfo returns information about the pilorama.
DumpInfo() Info
- Init() error
+ Init(context.Context) error
Open(context.Context, mode.Mode) error
- Close() error
- SetMode(m mode.Mode) error
+ Close(context.Context) error
+ SetMode(context.Context, mode.Mode) error
SetParentID(id string)
Forest
@@ -77,6 +79,38 @@ const (
AttributeVersion = "Version"
)
+// Cursor keeps state between function calls for traversing nodes.
+// It stores the attributes associated with a previous call, allowing subsequent operations
+// to resume traversal from this point rather than starting from the beginning.
+type Cursor struct {
+ // Last traversed filename.
+ filename string
+
+ // Last traversed node.
+ node Node
+}
+
+func NewCursor(filename string, node Node) *Cursor {
+ return &Cursor{
+ filename: filename,
+ node: node,
+ }
+}
+
+func (c *Cursor) GetFilename() string {
+ if c == nil {
+ return ""
+ }
+ return c.filename
+}
+
+func (c *Cursor) GetNode() Node {
+ if c == nil {
+ return Node(0)
+ }
+ return c.node
+}
+
// CIDDescriptor contains container ID and information about the node position
// in the list of container nodes.
type CIDDescriptor struct {
diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go
index 01d3da9f0..0c042aa56 100644
--- a/pkg/local_object_storage/pilorama/mode_test.go
+++ b/pkg/local_object_storage/pilorama/mode_test.go
@@ -19,13 +19,13 @@ func Test_Mode(t *testing.T) {
require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init())
+ require.NoError(t, f.Init(context.Background()))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close())
+ require.NoError(t, f.Close(context.Background()))
require.NoError(t, f.Open(context.Background(), mode.Degraded))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init())
+ require.NoError(t, f.Init(context.Background()))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close())
+ require.NoError(t, f.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go
index 106ba6ae9..36d347f10 100644
--- a/pkg/local_object_storage/pilorama/multinode.go
+++ b/pkg/local_object_storage/pilorama/multinode.go
@@ -25,6 +25,10 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool {
return true
}
+func (r *MultiNodeInfo) LastChild() Node {
+ return r.Children[len(r.Children)-1]
+}
+
func (n NodeInfo) ToMultiNode() MultiNodeInfo {
return MultiNodeInfo{
Children: MultiNode{n.ID},
diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go
index 54c2b90a6..eecee1527 100644
--- a/pkg/local_object_storage/pilorama/split_test.go
+++ b/pkg/local_object_storage/pilorama/split_test.go
@@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) {
require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4"))
require.Equal(t, []byte{10}, testGetByPath(t, "value0"))
- testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) {
+ testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) {
res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize)
require.NoError(t, err)
return res, last
diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go
index 364649b50..b4015ae8d 100644
--- a/pkg/local_object_storage/shard/container.go
+++ b/pkg/local_object_storage/shard/container.go
@@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 {
return r.size
}
-func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
+func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
s.m.RLock()
defer s.m.RUnlock()
@@ -34,9 +34,15 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
return ContainerSizeRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ContainerSizeRes{}, err
+ }
+ defer release()
+
size, err := s.metaBase.ContainerSize(prm.cnr)
if err != nil {
- return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err)
+ return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err)
}
return ContainerSizeRes{
@@ -69,9 +75,15 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont
return ContainerCountRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ContainerCountRes{}, err
+ }
+ defer release()
+
counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
if err != nil {
- return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err)
+ return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err)
}
return ContainerCountRes{
@@ -100,6 +112,12 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
return s.metaBase.DeleteContainerSize(ctx, id)
}
@@ -122,5 +140,11 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
return s.metaBase.DeleteContainerCount(ctx, id)
}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 62800dbd0..a607f70f7 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -20,25 +20,25 @@ import (
"golang.org/x/sync/errgroup"
)
-func (s *Shard) handleMetabaseFailure(stage string, err error) error {
- s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
+func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error {
+ s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.ReadOnly),
zap.Error(err))
- err = s.SetMode(mode.ReadOnly)
+ err = s.SetMode(ctx, mode.ReadOnly)
if err == nil {
return nil
}
- s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
+ s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.DegradedReadOnly),
zap.Error(err))
- err = s.SetMode(mode.DegradedReadOnly)
+ err = s.SetMode(ctx, mode.DegradedReadOnly)
if err != nil {
- return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly))
+ return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly)
}
return nil
}
@@ -72,10 +72,10 @@ func (s *Shard) Open(ctx context.Context) error {
for j := i + 1; j < len(components); j++ {
if err := components[j].Open(ctx, m); err != nil {
// Other components must be opened, fail.
- return fmt.Errorf("could not open %T: %w", components[j], err)
+ return fmt.Errorf("open %T: %w", components[j], err)
}
}
- err = s.handleMetabaseFailure("open", err)
+ err = s.handleMetabaseFailure(ctx, "open", err)
if err != nil {
return err
}
@@ -83,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error {
break
}
- return fmt.Errorf("could not open %T: %w", component, err)
+ return fmt.Errorf("open %T: %w", component, err)
}
}
return nil
@@ -91,8 +91,8 @@ func (s *Shard) Open(ctx context.Context) error {
type metabaseSynchronizer Shard
-func (x *metabaseSynchronizer) Init() error {
- ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init")
+func (x *metabaseSynchronizer) Init(ctx context.Context) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init")
defer span.End()
return (*Shard)(x).refillMetabase(ctx)
@@ -101,26 +101,24 @@ func (x *metabaseSynchronizer) Init() error {
// Init initializes all Shard's components.
func (s *Shard) Init(ctx context.Context) error {
m := s.GetMode()
- if err := s.initializeComponents(m); err != nil {
+ if err := s.initializeComponents(ctx, m); err != nil {
return err
}
s.updateMetrics(ctx)
s.gc = &gc{
- gcCfg: &s.gcCfg,
- remover: s.removeGarbage,
- stopChannel: make(chan struct{}),
- eventChan: make(chan Event),
- mEventHandler: map[eventType]*eventHandlers{
- eventNewEpoch: {
- cancelFunc: func() {},
- handlers: []eventHandler{
- s.collectExpiredLocks,
- s.collectExpiredObjects,
- s.collectExpiredTombstones,
- s.collectExpiredMetrics,
- },
+ gcCfg: &s.gcCfg,
+ remover: s.removeGarbage,
+ stopChannel: make(chan struct{}),
+ newEpochChan: make(chan uint64),
+ newEpochHandlers: &newEpochHandlers{
+ cancelFunc: func() {},
+ handlers: []newEpochHandler{
+ s.collectExpiredLocks,
+ s.collectExpiredObjects,
+ s.collectExpiredTombstones,
+ s.collectExpiredMetrics,
},
},
}
@@ -138,9 +136,9 @@ func (s *Shard) Init(ctx context.Context) error {
return nil
}
-func (s *Shard) initializeComponents(m mode.Mode) error {
+func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
type initializer interface {
- Init() error
+ Init(context.Context) error
}
var components []initializer
@@ -170,13 +168,13 @@ func (s *Shard) initializeComponents(m mode.Mode) error {
}
for _, component := range components {
- if err := component.Init(); err != nil {
+ if err := component.Init(ctx); err != nil {
if component == s.metaBase {
if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) {
return fmt.Errorf("metabase initialization: %w", err)
}
- err = s.handleMetabaseFailure("init", err)
+ err = s.handleMetabaseFailure(ctx, "init", err)
if err != nil {
return err
}
@@ -184,7 +182,7 @@ func (s *Shard) initializeComponents(m mode.Mode) error {
break
}
- return fmt.Errorf("could not initialize %T: %w", component, err)
+ return fmt.Errorf("initialize %T: %w", component, err)
}
}
return nil
@@ -205,19 +203,19 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err := s.metaBase.Reset()
if err != nil {
- return fmt.Errorf("could not reset metabase: %w", err)
+ return fmt.Errorf("reset metabase: %w", err)
}
withCount := true
totalObjects, err := s.blobStor.ObjectsCount(ctx)
if err != nil {
- s.log.Warn(logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
+ s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
withCount = false
}
eg, egCtx := errgroup.WithContext(ctx)
- if s.cfg.refillMetabaseWorkersCount > 0 {
- eg.SetLimit(s.cfg.refillMetabaseWorkersCount)
+ if s.refillMetabaseWorkersCount > 0 {
+ eg.SetLimit(s.refillMetabaseWorkersCount)
}
var completedCount uint64
@@ -254,12 +252,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err = errors.Join(egErr, itErr)
if err != nil {
- return fmt.Errorf("could not put objects to the meta: %w", err)
+ return fmt.Errorf("put objects to the meta: %w", err)
}
err = s.metaBase.SyncCounters()
if err != nil {
- return fmt.Errorf("could not sync object counters: %w", err)
+ return fmt.Errorf("sync object counters: %w", err)
}
success = true
@@ -270,9 +268,9 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error {
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- s.log.Warn(logs.ShardCouldNotUnmarshalObject,
+ s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
zap.Stringer("address", addr),
- zap.String("err", err.Error()))
+ zap.Error(err))
return nil
}
@@ -280,12 +278,12 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
var isIndexedContainer bool
if hasIndexedAttribute {
- info, err := s.containerInfo.Info(addr.Container())
+ info, err := s.containerInfo.Info(ctx, addr.Container())
if err != nil {
return err
}
if info.Removed {
- s.log.Debug(logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
+ s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
return nil
}
isIndexedContainer = info.Indexed
@@ -318,7 +316,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("could not unmarshal lock content: %w", err)
+ return fmt.Errorf("unmarshal lock content: %w", err)
}
locked := make([]oid.ID, lock.NumberOfMembers())
@@ -328,7 +326,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err
id, _ := obj.ID()
err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
- return fmt.Errorf("could not lock objects: %w", err)
+ return fmt.Errorf("lock objects: %w", err)
}
return nil
}
@@ -337,7 +335,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("could not unmarshal tombstone content: %w", err)
+ return fmt.Errorf("unmarshal tombstone content: %w", err)
}
tombAddr := object.AddressOf(obj)
@@ -358,17 +356,18 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
_, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
- return fmt.Errorf("could not inhume objects: %w", err)
+ return fmt.Errorf("inhume objects: %w", err)
}
return nil
}
// Close releases all Shard's components.
-func (s *Shard) Close() error {
+func (s *Shard) Close(ctx context.Context) error {
+ unlock := s.lockExclusive()
if s.rb != nil {
- s.rb.Stop(s.log)
+ s.rb.Stop(ctx, s.log)
}
- var components []interface{ Close() error }
+ var components []interface{ Close(context.Context) error }
if s.pilorama != nil {
components = append(components, s.pilorama)
@@ -384,15 +383,23 @@ func (s *Shard) Close() error {
var lastErr error
for _, component := range components {
- if err := component.Close(); err != nil {
+ if err := component.Close(ctx); err != nil {
lastErr = err
- s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
+ s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err))
}
}
+ if s.opsLimiter != nil {
+ s.opsLimiter.Close()
+ }
+
+ unlock()
+
+ // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock.
+ // So to prevent deadlock GC stopping is outside of exclusive lock.
// If Init/Open was unsuccessful gc can be nil.
if s.gc != nil {
- s.gc.stop()
+ s.gc.stop(ctx)
}
return lastErr
@@ -414,18 +421,18 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
unlock := s.lockExclusive()
defer unlock()
- s.rb.Stop(s.log)
+ s.rb.Stop(ctx, s.log)
if !s.info.Mode.NoMetabase() {
defer func() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}()
}
- ok, err := s.metaBase.Reload(c.metaOpts...)
+ ok, err := s.metaBase.Reload(ctx, c.metaOpts...)
if err != nil {
if errors.Is(err, meta.ErrDegradedMode) {
- s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
- _ = s.setMode(mode.DegradedReadOnly)
+ s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
+ _ = s.setMode(ctx, mode.DegradedReadOnly)
}
return err
}
@@ -437,15 +444,28 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
// config after the node was updated.
err = s.refillMetabase(ctx)
} else {
- err = s.metaBase.Init()
+ err = s.metaBase.Init(ctx)
}
if err != nil {
- s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
- _ = s.setMode(mode.DegradedReadOnly)
+ s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
+ _ = s.setMode(ctx, mode.DegradedReadOnly)
return err
}
}
- return s.setMode(c.info.Mode)
+ if err := s.setMode(ctx, c.info.Mode); err != nil {
+ return err
+ }
+ s.reloadOpsLimiter(&c)
+
+ return nil
+}
+
+func (s *Shard) reloadOpsLimiter(c *cfg) {
+ if c.configOpsLimiter != nil {
+ old := s.opsLimiter.ptr.Swap(&qosLimiterHolder{Limiter: c.configOpsLimiter})
+ old.Close()
+ s.opsLimiter.SetParentID(s.info.ID.String())
+ }
}
func (s *Shard) lockExclusive() func() {
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index b8f1d4417..6d2cd7137 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -86,7 +86,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadWrite, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
// Metabase can be opened in read-only => start in ReadOnly mode.
allowedMode.Store(int64(os.O_RDONLY))
@@ -95,9 +95,9 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.Error(t, sh.SetMode(mode.ReadWrite))
+ require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
// Metabase is corrupted => start in DegradedReadOnly mode.
allowedMode.Store(math.MaxInt64)
@@ -106,7 +106,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.DegradedReadOnly, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
}
func TestRefillMetabaseCorrupted(t *testing.T) {
@@ -146,7 +146,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
putPrm.SetObject(obj)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
addr := object.AddressOf(obj)
// This is copied from `fstree.treePath()` to avoid exporting function just for tests.
@@ -170,7 +170,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
getPrm.SetAddress(addr)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
}
func TestRefillMetabase(t *testing.T) {
@@ -358,7 +358,7 @@ func TestRefillMetabase(t *testing.T) {
phyBefore := c.Phy
logicalBefore := c.Logic
- err = sh.Close()
+ err = sh.Close(context.Background())
require.NoError(t, err)
sh = New(
@@ -379,7 +379,7 @@ func TestRefillMetabase(t *testing.T) {
// initialize Blobstor
require.NoError(t, sh.Init(context.Background()))
- defer sh.Close()
+ defer sh.Close(context.Background())
checkAllObjs(false)
checkObj(object.AddressOf(tombObj), nil)
diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go
index b3bc6a30b..8dc1f0522 100644
--- a/pkg/local_object_storage/shard/count.go
+++ b/pkg/local_object_storage/shard/count.go
@@ -23,6 +23,12 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) {
return 0, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
+
cc, err := s.metaBase.ObjectCounters()
if err != nil {
return 0, err
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index c898fdf41..0101817a8 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -55,6 +54,12 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del
return DeleteRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return DeleteRes{}, err
+ }
+ defer release()
+
result := DeleteRes{}
for _, addr := range prm.addr {
select {
@@ -95,7 +100,7 @@ func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr
}
_, err := s.writeCache.Head(ctx, addr)
if err == nil {
- s.log.Warn(logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
+ s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
return fmt.Errorf("object %s must be flushed from writecache", addr)
}
if client.IsErrObjectNotFound(err) {
@@ -110,10 +115,9 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
res, err := s.metaBase.StorageID(ctx, sPrm)
if err != nil {
- s.log.Debug(logs.StorageIDRetrievalFailure,
+ s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
zap.Stringer("object", addr),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return err
}
storageID := res.StorageID()
@@ -130,10 +134,9 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
_, err = s.blobStor.Delete(ctx, delPrm)
if err != nil && !client.IsErrObjectNotFound(err) {
- s.log.Debug(logs.ObjectRemovalFailureBlobStor,
+ s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
zap.Stringer("object_address", addr),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return err
}
return nil
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index 574250a93..c9ce93bc5 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -37,7 +37,7 @@ func TestShard_Delete_BigObject(t *testing.T) {
func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index 784bf293a..2c11b6b01 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -18,7 +18,7 @@ type ExistsPrm struct {
// Exists option to set object checked for existence.
Address oid.Address
// Exists option to set parent object checked for existence.
- ParentAddress oid.Address
+ ECParentAddress oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
@@ -53,10 +53,6 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
))
defer span.End()
- var exists bool
- var locked bool
- var err error
-
s.m.RLock()
defer s.m.RUnlock()
@@ -64,7 +60,18 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
return ExistsRes{}, ErrShardDisabled
} else if s.info.EvacuationInProgress {
return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
- } else if s.info.Mode.NoMetabase() {
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ExistsRes{}, err
+ }
+ defer release()
+
+ var exists bool
+ var locked bool
+
+ if s.info.Mode.NoMetabase() {
var p common.ExistsPrm
p.Address = prm.Address
@@ -74,7 +81,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
} else {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(prm.Address)
- existsPrm.SetParent(prm.ParentAddress)
+ existsPrm.SetECParent(prm.ECParentAddress)
var res meta.ExistsRes
res, err = s.metaBase.Exists(ctx, existsPrm)
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index d605746e8..a262a52cb 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -6,11 +6,13 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -31,41 +33,14 @@ type TombstoneSource interface {
IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool
}
-// Event represents class of external events.
-type Event interface {
- typ() eventType
-}
+type newEpochHandler func(context.Context, uint64)
-type eventType int
-
-const (
- _ eventType = iota
- eventNewEpoch
-)
-
-type newEpoch struct {
- epoch uint64
-}
-
-func (e newEpoch) typ() eventType {
- return eventNewEpoch
-}
-
-// EventNewEpoch returns new epoch event.
-func EventNewEpoch(e uint64) Event {
- return newEpoch{
- epoch: e,
- }
-}
-
-type eventHandler func(context.Context, Event)
-
-type eventHandlers struct {
+type newEpochHandlers struct {
prevGroup sync.WaitGroup
cancelFunc context.CancelFunc
- handlers []eventHandler
+ handlers []newEpochHandler
}
type gcRunResult struct {
@@ -107,10 +82,10 @@ type gc struct {
remover func(context.Context) gcRunResult
- // eventChan is used only for listening for the new epoch event.
+ // newEpochChan is used only for listening for the new epoch event.
// It is ok to keep opened, we are listening for context done when writing in it.
- eventChan chan Event
- mEventHandler map[eventType]*eventHandlers
+ newEpochChan chan uint64
+ newEpochHandlers *newEpochHandlers
}
type gcCfg struct {
@@ -131,7 +106,7 @@ type gcCfg struct {
func defaultGCCfg() gcCfg {
return gcCfg{
removerInterval: 10 * time.Second,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
workerPoolInit: func(int) util.WorkerPool {
return nil
},
@@ -140,16 +115,8 @@ func defaultGCCfg() gcCfg {
}
func (gc *gc) init(ctx context.Context) {
- sz := 0
-
- for _, v := range gc.mEventHandler {
- sz += len(v.handlers)
- }
-
- if sz > 0 {
- gc.workerPool = gc.workerPoolInit(sz)
- }
-
+ gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers))
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
gc.wg.Add(2)
go gc.tickRemover(ctx)
go gc.listenEvents(ctx)
@@ -161,14 +128,14 @@ func (gc *gc) listenEvents(ctx context.Context) {
for {
select {
case <-gc.stopChannel:
- gc.log.Warn(logs.ShardStopEventListenerByClosedStopChannel)
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel)
return
case <-ctx.Done():
- gc.log.Warn(logs.ShardStopEventListenerByContext)
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
return
- case event, ok := <-gc.eventChan:
+ case event, ok := <-gc.newEpochChan:
if !ok {
- gc.log.Warn(logs.ShardStopEventListenerByClosedEventChannel)
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
return
}
@@ -177,43 +144,38 @@ func (gc *gc) listenEvents(ctx context.Context) {
}
}
-func (gc *gc) handleEvent(ctx context.Context, event Event) {
- v, ok := gc.mEventHandler[event.typ()]
- if !ok {
- return
- }
-
- v.cancelFunc()
- v.prevGroup.Wait()
+func (gc *gc) handleEvent(ctx context.Context, epoch uint64) {
+ gc.newEpochHandlers.cancelFunc()
+ gc.newEpochHandlers.prevGroup.Wait()
var runCtx context.Context
- runCtx, v.cancelFunc = context.WithCancel(ctx)
+ runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx)
- v.prevGroup.Add(len(v.handlers))
+ gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers))
- for i := range v.handlers {
+ for i := range gc.newEpochHandlers.handlers {
select {
case <-ctx.Done():
return
default:
}
- h := v.handlers[i]
+ h := gc.newEpochHandlers.handlers[i]
err := gc.workerPool.Submit(func() {
- defer v.prevGroup.Done()
- h(runCtx, event)
+ defer gc.newEpochHandlers.prevGroup.Done()
+ h(runCtx, epoch)
})
if err != nil {
- gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
- zap.String("error", err.Error()),
+ gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
+ zap.Error(err),
)
- v.prevGroup.Done()
+ gc.newEpochHandlers.prevGroup.Done()
}
}
}
-func (gc *gc) releaseResources() {
+func (gc *gc) releaseResources(ctx context.Context) {
if gc.workerPool != nil {
gc.workerPool.Release()
}
@@ -222,7 +184,7 @@ func (gc *gc) releaseResources() {
// because it is possible that we are close it earlier than stop writing.
// It is ok to keep it opened.
- gc.log.Debug(logs.ShardGCIsStopped)
+ gc.log.Debug(ctx, logs.ShardGCIsStopped)
}
func (gc *gc) tickRemover(ctx context.Context) {
@@ -236,10 +198,10 @@ func (gc *gc) tickRemover(ctx context.Context) {
case <-ctx.Done():
// Context canceled earlier than we start to close shards.
// It make sense to stop collecting garbage by context too.
- gc.releaseResources()
+ gc.releaseResources(ctx)
return
case <-gc.stopChannel:
- gc.releaseResources()
+ gc.releaseResources(ctx)
return
case <-timer.C:
startedAt := time.Now()
@@ -258,13 +220,16 @@ func (gc *gc) tickRemover(ctx context.Context) {
}
}
-func (gc *gc) stop() {
+func (gc *gc) stop(ctx context.Context) {
gc.onceStop.Do(func() {
close(gc.stopChannel)
})
- gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
+ gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
+
+ gc.newEpochHandlers.cancelFunc()
+ gc.newEpochHandlers.prevGroup.Wait()
}
// iterates over metabase and deletes objects
@@ -286,8 +251,47 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
return
}
- s.log.Debug(logs.ShardGCRemoveGarbageStarted)
- defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted)
+ s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted)
+ defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted)
+
+ buf, err := s.getGarbage(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
+ zap.Error(err),
+ )
+
+ return
+ } else if len(buf) == 0 {
+ result.success = true
+ return
+ }
+
+ var deletePrm DeletePrm
+ deletePrm.SetAddresses(buf...)
+
+ // delete accumulated objects
+ res, err := s.delete(ctx, deletePrm, true)
+
+ result.deleted = res.deleted
+ result.failedToDelete = uint64(len(buf)) - res.deleted
+ result.success = true
+
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
+ zap.Error(err),
+ )
+ result.success = false
+ }
+
+ return
+}
+
+func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
buf := make([]oid.Address, 0, s.rmBatchSize)
@@ -308,47 +312,20 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
return nil
})
- // iterate over metabase's objects with GC mark
- // (no more than s.rmBatchSize objects)
- err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
- if err != nil {
- s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
- zap.String("error", err.Error()),
- )
-
- return
- } else if len(buf) == 0 {
- result.success = true
- return
+ if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil {
+ return nil, err
}
- var deletePrm DeletePrm
- deletePrm.SetAddresses(buf...)
-
- // delete accumulated objects
- res, err := s.delete(ctx, deletePrm, true)
-
- result.deleted = res.deleted
- result.failedToDelete = uint64(len(buf)) - res.deleted
- result.success = true
-
- if err != nil {
- s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
- zap.String("error", err.Error()),
- )
- result.success = false
- }
-
- return
+ return buf, nil
}
func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
- workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount)
- batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize)
+ workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
+ batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
return
}
-func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
+func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
var err error
startedAt := time.Now()
@@ -356,8 +333,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
}()
- s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
- defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -366,7 +343,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock {
batch = append(batch, o.Address())
@@ -396,7 +373,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
+ s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
}
}
@@ -414,24 +391,25 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
return
}
+ s.handleExpiredObjectsUnsafe(ctx, expired)
+}
+
+func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
expired, err := s.getExpiredWithLinked(ctx, expired)
if err != nil {
- s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
+ s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
return
}
- var inhumePrm meta.InhumePrm
-
- inhumePrm.SetAddresses(expired...)
- inhumePrm.SetGCMark()
-
- // inhume the collected objects
- res, err := s.metaBase.Inhume(ctx, inhumePrm)
+ res, err := s.inhumeGC(ctx, expired)
if err != nil {
- s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err))
return
}
@@ -449,6 +427,12 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
}
func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+
result := make([]oid.Address, 0, len(source))
parentToChildren, err := s.metaBase.GetChildren(ctx, source)
if err != nil {
@@ -462,7 +446,20 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address)
return result, nil
}
-func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
+func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) {
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return meta.InhumeRes{}, err
+ }
+ defer release()
+
+ var inhumePrm meta.InhumePrm
+ inhumePrm.SetAddresses(addrs...)
+ inhumePrm.SetGCMark()
+ return s.metaBase.Inhume(ctx, inhumePrm)
+}
+
+func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
var err error
startedAt := time.Now()
@@ -470,11 +467,10 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone)
}()
- epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
- log.Debug(logs.ShardStartedExpiredTombstonesHandling)
- defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
+ log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
+ defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling)
const tssDeleteBatch = 50
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@@ -492,22 +488,29 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
})
for {
- log.Debug(logs.ShardIteratingTombstones)
+ log.Debug(ctx, logs.ShardIteratingTombstones)
s.m.RLock()
if s.info.Mode.NoMetabase() {
- s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
+ s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
s.m.RUnlock()
return
}
- err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
+ var release qos.ReleaseFunc
+ release, err = s.opsLimiter.ReadRequest(ctx)
if err != nil {
- log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+ log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+ s.m.RUnlock()
+ return
+ }
+ err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
+ release()
+ if err != nil {
+ log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
-
return
}
@@ -524,7 +527,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
}
}
- log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
+ log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
if len(tssExp) > 0 {
s.expiredTombstonesCallback(ctx, tssExp)
}
@@ -535,7 +538,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
}
}
-func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
+func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
var err error
startedAt := time.Now()
@@ -543,8 +546,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
}()
- s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
- defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -554,14 +557,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
if o.Type() == objectSDK.TypeLock {
batch = append(batch, o.Address())
if len(batch) == batchSize {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
+ s.expiredLocksCallback(egCtx, epoch, expired)
return egCtx.Err()
})
batch = make([]oid.Address, 0, batchSize)
@@ -575,7 +578,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
if len(batch) > 0 {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
+ s.expiredLocksCallback(egCtx, epoch, expired)
return egCtx.Err()
})
}
@@ -584,7 +587,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
+ s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
}
}
@@ -596,7 +599,13 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
return ErrDegradedMode
}
- err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
select {
case <-ctx.Done():
return meta.ErrInterruptIterator
@@ -612,12 +621,11 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
}
func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return nil, ErrDegradedMode
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
}
+ defer release()
return s.metaBase.FilterExpired(ctx, epoch, addresses)
}
@@ -627,28 +635,22 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid
//
// Does not modify tss.
func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
- if s.GetMode().NoMetabase() {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
return
}
- // Mark tombstones as garbage.
- var pInhume meta.InhumePrm
-
- tsAddrs := make([]oid.Address, 0, len(tss))
- for _, ts := range tss {
- tsAddrs = append(tsAddrs, ts.Tombstone())
- }
-
- pInhume.SetGCMark()
- pInhume.SetAddresses(tsAddrs...)
-
- // inhume tombstones
- res, err := s.metaBase.Inhume(ctx, pInhume)
+ release, err := s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
+ return
+ }
+ res, err := s.metaBase.InhumeTombstones(ctx, tss)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
return
}
@@ -663,26 +665,27 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size))
i++
}
-
- // drop just processed expired tombstones
- // from graveyard
- err = s.metaBase.DropGraves(ctx, tss)
- if err != nil {
- s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
- }
}
// HandleExpiredLocks unlocks all objects which were locked by lockers.
// If successful, marks lockers themselves as garbage.
func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
- if s.GetMode().NoMetabase() {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return
+ }
+
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
return
}
unlocked, err := s.metaBase.FreeLockedBy(lockers)
+ release()
if err != nil {
- s.log.Warn(logs.ShardFailureToUnlockObjects,
- zap.String("error", err.Error()),
- )
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
return
}
@@ -690,13 +693,15 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
var pInhume meta.InhumePrm
pInhume.SetAddresses(lockers...)
pInhume.SetForceGCMark()
-
- res, err := s.metaBase.Inhume(ctx, pInhume)
+ release, err = s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
+ return
+ }
+ res, err := s.metaBase.Inhume(ctx, pInhume)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
return
}
@@ -718,7 +723,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
if err != nil {
- s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
+ s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
return
}
@@ -726,47 +731,57 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc
return
}
- s.handleExpiredObjects(ctx, expiredUnlocked)
+ s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked)
}
// HandleDeletedLocks unlocks all objects which were locked by lockers.
-func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
- if s.GetMode().NoMetabase() {
+func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
return
}
- _, err := s.metaBase.FreeLockedBy(lockers)
+ release, err := s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardFailureToUnlockObjects,
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ return
+ }
+ _, err = s.metaBase.FreeLockedBy(lockers)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
return
}
}
-// NotificationChannel returns channel for shard events.
-func (s *Shard) NotificationChannel() chan<- Event {
- return s.gc.eventChan
+// NotificationChannel returns channel for new epoch events.
+func (s *Shard) NotificationChannel() chan<- uint64 {
+ return s.gc.newEpochChan
}
-func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
+func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) {
ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics")
defer span.End()
- epoch := e.(newEpoch).epoch
-
- s.log.Debug(logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
s.collectExpiredContainerSizeMetrics(ctx, epoch)
s.collectExpiredContainerCountMetrics(ctx, epoch)
}
func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) {
- ids, err := s.metaBase.ZeroSizeContainers(ctx)
+ release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ return
+ }
+ ids, err := s.metaBase.ZeroSizeContainers(ctx)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
return
}
if len(ids) == 0 {
@@ -776,9 +791,15 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui
}
func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) {
- ids, err := s.metaBase.ZeroCountContainers(ctx)
+ release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ return
+ }
+ ids, err := s.metaBase.ZeroCountContainers(ctx)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
return
}
if len(ids) == 0 {
diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go
index 11db5e54e..54d2f1510 100644
--- a/pkg/local_object_storage/shard/gc_internal_test.go
+++ b/pkg/local_object_storage/shard/gc_internal_test.go
@@ -37,7 +37,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -61,8 +62,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
meta.WithEpochState(epochState{}),
),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))),
- WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(addresses)
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
@@ -79,7 +80,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
sh = New(opts...)
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index 2b97111e7..f512a488a 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -34,7 +34,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
require.NoError(t, err)
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
+ sh.gc.handleEvent(context.Background(), epoch.Value)
var getPrm GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
@@ -131,7 +131,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
@@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
require.True(t, errors.As(err, &splitInfoError), "split info must be provided")
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
+ sh.gc.handleEvent(context.Background(), epoch.Value)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")
@@ -190,7 +190,7 @@ func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool
additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
obj := testutil.GenerateObjectWithSize(1024)
@@ -254,7 +254,7 @@ func TestGCDontDeleteObjectFromWritecache(t *testing.T) {
additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
obj := testutil.GenerateObjectWithSize(1024)
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index d1c393613..28f8912be 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -10,7 +10,6 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -112,6 +111,12 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
return c.Get(ctx, prm.addr)
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return GetRes{}, err
+ }
+ defer release()
+
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
@@ -144,7 +149,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
} else {
- s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
+ s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
}
if s.hasWriteCache() {
@@ -153,16 +158,14 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return res, false, err
}
if client.IsErrObjectNotFound(err) {
- s.log.Debug(logs.ShardObjectIsMissingInWritecache,
+ s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache,
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Bool("skip_meta", skipMeta))
} else {
- s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
+ s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache,
zap.Error(err),
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Bool("skip_meta", skipMeta))
}
}
if skipMeta || mErr != nil {
@@ -175,7 +178,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
- return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
+ return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err)
}
storageID := mExRes.StorageID()
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index d0eecf74e..837991b73 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -30,7 +30,7 @@ func TestShard_Get(t *testing.T) {
func testShardGet(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
var putPrm PutPrm
var getPrm GetPrm
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index ff57e3bf9..34b8290d6 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -81,6 +81,12 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
headParams.SetAddress(prm.addr)
headParams.SetRaw(prm.raw)
+ release, limitErr := s.opsLimiter.ReadRequest(ctx)
+ if limitErr != nil {
+ return HeadRes{}, limitErr
+ }
+ defer release()
+
var res meta.GetRes
res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index c65bbb1e3..deb3019df 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -28,7 +28,7 @@ func TestShard_Head(t *testing.T) {
func testShardHead(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
var putPrm PutPrm
var headPrm HeadPrm
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index a72313498..7391adef2 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -1,11 +1,11 @@
package shard
import (
+ "context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/mr-tron/base58"
"go.uber.org/zap"
)
@@ -31,12 +31,12 @@ func (s *Shard) ID() *ID {
}
// UpdateID reads shard ID saved in the metabase and updates it if it is missing.
-func (s *Shard) UpdateID() (err error) {
+func (s *Shard) UpdateID(ctx context.Context) (err error) {
var idFromMetabase []byte
modeDegraded := s.GetMode().NoMetabase()
if !modeDegraded {
- if idFromMetabase, err = s.metaBase.GetShardID(mode.ReadOnly); err != nil {
- err = fmt.Errorf("failed to read shard id from metabase: %w", err)
+ if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
+ err = fmt.Errorf("read shard id from metabase: %w", err)
}
}
@@ -45,12 +45,12 @@ func (s *Shard) UpdateID() (err error) {
}
shardID := s.info.ID.String()
- s.cfg.metricsWriter.SetShardID(shardID)
+ s.metricsWriter.SetShardID(shardID)
if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID)
}
- s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))}
+ s.log = s.log.With(zap.Stringer("shard_id", s.info.ID))
s.metaBase.SetLogger(s.log)
s.blobStor.SetLogger(s.log)
if s.hasWriteCache() {
@@ -61,10 +61,11 @@ func (s *Shard) UpdateID() (err error) {
if s.pilorama != nil {
s.pilorama.SetParentID(s.info.ID.String())
}
+ s.opsLimiter.SetParentID(s.info.ID.String())
if len(idFromMetabase) == 0 && !modeDegraded {
- if setErr := s.metaBase.SetShardID(*s.info.ID, s.GetMode()); setErr != nil {
- err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr))
+ if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
+ err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr))
}
}
return
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index 746177c3a..c0fd65f4b 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -82,6 +81,12 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return InhumeRes{}, err
+ }
+ defer release()
+
if s.hasWriteCache() {
for i := range prm.target {
_ = s.writeCache.Delete(ctx, prm.target[i])
@@ -109,9 +114,8 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrLockObjectRemoval
}
- s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
+ s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
+ zap.Error(err),
)
s.m.RUnlock()
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 1353d5d94..1421f0e18 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -27,7 +27,7 @@ func TestShard_Inhume(t *testing.T) {
func testShardInhume(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 8d09974b8..af87981ca 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -107,9 +106,15 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
return SelectRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return SelectRes{}, err
+ }
+ defer release()
+
lst, err := s.metaBase.Containers(ctx)
if err != nil {
- return res, fmt.Errorf("can't list stored containers: %w", err)
+ return res, fmt.Errorf("list stored containers: %w", err)
}
filters := objectSDK.NewSearchFilters()
@@ -122,10 +127,9 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase
if err != nil {
- s.log.Debug(logs.ShardCantSelectAllObjects,
+ s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
continue
}
@@ -147,9 +151,15 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo
return ListContainersRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ListContainersRes{}, err
+ }
+ defer release()
+
containers, err := s.metaBase.Containers(ctx)
if err != nil {
- return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err)
+ return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err)
}
return ListContainersRes{
@@ -175,12 +185,18 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
return ListWithCursorRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ListWithCursorRes{}, err
+ }
+ defer release()
+
var metaPrm meta.ListPrm
metaPrm.SetCount(prm.count)
metaPrm.SetCursor(prm.cursor)
res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
if err != nil {
- return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err)
+ return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err)
}
return ListWithCursorRes{
@@ -204,11 +220,17 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai
return ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
var metaPrm meta.IterateOverContainersPrm
metaPrm.Handler = prm.Handler
- err := s.metaBase.IterateOverContainers(ctx, metaPrm)
+ err = s.metaBase.IterateOverContainers(ctx, metaPrm)
if err != nil {
- return fmt.Errorf("could not iterate over containers: %w", err)
+ return fmt.Errorf("iterate over containers: %w", err)
}
return nil
@@ -229,13 +251,19 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
return ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
var metaPrm meta.IterateOverObjectsInContainerPrm
metaPrm.ContainerID = prm.ContainerID
metaPrm.ObjectType = prm.ObjectType
metaPrm.Handler = prm.Handler
- err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
+ err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
if err != nil {
- return fmt.Errorf("could not iterate over objects: %w", err)
+ return fmt.Errorf("iterate over objects: %w", err)
}
return nil
@@ -253,12 +281,18 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive
return 0, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
+
var metaPrm meta.CountAliveObjectsInContainerPrm
metaPrm.ObjectType = prm.ObjectType
metaPrm.ContainerID = prm.ContainerID
count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
if err != nil {
- return 0, fmt.Errorf("could not count alive objects in bucket: %w", err)
+ return 0, fmt.Errorf("count alive objects in bucket: %w", err)
}
return count, nil
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index 3414dc76a..139b2e316 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -18,14 +18,14 @@ func TestShard_List(t *testing.T) {
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
testShardList(t, sh)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
shWC := newShard(t, true)
- defer func() { require.NoError(t, shWC.Close()) }()
+ defer func() { require.NoError(t, shWC.Close(context.Background())) }()
testShardList(t, shWC)
})
}
diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go
index 4a8d89d63..9c392fdac 100644
--- a/pkg/local_object_storage/shard/lock.go
+++ b/pkg/local_object_storage/shard/lock.go
@@ -38,7 +38,13 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []
return ErrDegradedMode
}
- err := s.metaBase.Lock(ctx, idCnr, locker, locked)
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ err = s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@@ -61,6 +67,12 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return false, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer release()
+
var prm meta.IsLockedPrm
prm.SetAddress(addr)
@@ -72,10 +84,10 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return res.Locked(), nil
}
-// GetLocked return lock id's of the provided object. Not found object is
+// GetLocks return lock id's of the provided object. Not found object is
// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
-func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocked",
+func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", addr.EncodeToString()),
@@ -86,5 +98,12 @@ func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, erro
if m.NoMetabase() {
return nil, ErrDegradedMode
}
- return s.metaBase.GetLocked(ctx, addr)
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+
+ return s.metaBase.GetLocks(ctx, addr)
}
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 9ce95feb1..3878a65cd 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -28,9 +28,10 @@ func TestShard_Lock(t *testing.T) {
var sh *Shard
rootPath := t.TempDir()
+ l := logger.NewLoggerWrapper(zap.NewNop())
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
- WithLogger(&logger.Logger{Logger: zap.NewNop()}),
+ WithLogger(l),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -53,8 +54,8 @@ func TestShard_Lock(t *testing.T) {
meta.WithPath(filepath.Join(rootPath, "meta")),
meta.WithEpochState(epochState{}),
),
- WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(addresses)
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
}),
}
@@ -62,7 +63,7 @@ func TestShard_Lock(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
@@ -148,7 +149,7 @@ func TestShard_Lock(t *testing.T) {
func TestShard_IsLocked(t *testing.T) {
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index cec5a12ad..5230dcad0 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -201,11 +201,11 @@ func TestCounters(t *testing.T) {
dir := t.TempDir()
sh, mm := shardWithMetrics(t, dir)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
- sh.SetMode(mode.ReadOnly)
+ sh.SetMode(context.Background(), mode.ReadOnly)
require.Equal(t, mode.ReadOnly, mm.mode)
- sh.SetMode(mode.ReadWrite)
+ sh.SetMode(context.Background(), mode.ReadWrite)
require.Equal(t, mode.ReadWrite, mm.mode)
const objNumber = 10
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index d90a5f4b6..901528976 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -1,6 +1,8 @@
package shard
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -18,19 +20,21 @@ var ErrDegradedMode = logicerr.New("shard is in degraded mode")
//
// Returns any error encountered that did not allow
// setting shard mode.
-func (s *Shard) SetMode(m mode.Mode) error {
+func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error {
unlock := s.lockExclusive()
defer unlock()
- return s.setMode(m)
+ return s.setMode(ctx, m)
}
-func (s *Shard) setMode(m mode.Mode) error {
- s.log.Info(logs.ShardSettingShardMode,
+func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
+ s.log.Info(ctx, logs.ShardSettingShardMode,
zap.Stringer("old_mode", s.info.Mode),
zap.Stringer("new_mode", m))
- components := []interface{ SetMode(mode.Mode) error }{
+ components := []interface {
+ SetMode(context.Context, mode.Mode) error
+ }{
s.metaBase, s.blobStor,
}
@@ -58,7 +62,7 @@ func (s *Shard) setMode(m mode.Mode) error {
if !m.Disabled() {
for i := range components {
- if err := components[i].SetMode(m); err != nil {
+ if err := components[i].SetMode(ctx, m); err != nil {
return err
}
}
@@ -67,7 +71,7 @@ func (s *Shard) setMode(m mode.Mode) error {
s.info.Mode = m
s.metricsWriter.SetMode(s.info.Mode)
- s.log.Info(logs.ShardShardModeSetSuccessfully,
+ s.log.Info(ctx, logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
return nil
}
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index 24cc75154..f8cb00a31 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -67,6 +67,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var res common.PutRes
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return PutRes{}, err
+ }
+ defer release()
+
// exist check are not performed there, these checks should be executed
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
@@ -75,13 +81,13 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
}
if err != nil || !tryCache {
if err != nil {
- s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
- zap.String("err", err.Error()))
+ s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
+ zap.Error(err))
}
res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
- return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
+ return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err)
}
}
@@ -94,7 +100,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
if err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
- return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
+ return PutRes{}, fmt.Errorf("put object to metabase: %w", err)
}
if res.Inserted {
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index 701268820..443689104 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -131,6 +131,12 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
return obj, nil
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return RngRes{}, err
+ }
+ defer release()
+
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go
index cc73db316..06fe9f511 100644
--- a/pkg/local_object_storage/shard/range_test.go
+++ b/pkg/local_object_storage/shard/range_test.go
@@ -79,7 +79,8 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -94,7 +95,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
}),
},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 0d83caa0c..20f1f2b6f 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -6,10 +6,13 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -18,37 +21,9 @@ import (
var ErrRebuildInProgress = errors.New("shard rebuild in progress")
-type RebuildWorkerLimiter interface {
- AcquireWorkSlot(ctx context.Context) error
- ReleaseWorkSlot()
-}
-
-type rebuildLimiter struct {
- semaphore chan struct{}
-}
-
-func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter {
- return &rebuildLimiter{
- semaphore: make(chan struct{}, workersCount),
- }
-}
-
-func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error {
- select {
- case l.semaphore <- struct{}{}:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func (l *rebuildLimiter) ReleaseWorkSlot() {
- <-l.semaphore
-}
-
type rebuildTask struct {
- limiter RebuildWorkerLimiter
- fillPercent int
+ concurrencyLimiter common.RebuildLimiter
+ fillPercent int
}
type rebuilder struct {
@@ -88,36 +63,37 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D
if !ok {
continue
}
- runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter)
+ runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter)
}
}
}()
}
func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
- fillPercent int, limiter RebuildWorkerLimiter,
+ fillPercent int, concLimiter common.RebuildLimiter,
) {
select {
case <-ctx.Done():
return
default:
}
- log.Info(logs.BlobstoreRebuildStarted)
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil {
- log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
+ log.Info(ctx, logs.BlobstoreRebuildStarted)
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil {
+ log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err))
} else {
- log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
+ log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully)
}
}
-func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int,
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int,
) error {
select {
case <-ctx.Done():
return ctx.Err()
case r.tasks <- rebuildTask{
- limiter: limiter,
- fillPercent: fillPercent,
+ concurrencyLimiter: limiter,
+ fillPercent: fillPercent,
}:
return nil
default:
@@ -125,7 +101,7 @@ func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLi
}
}
-func (r *rebuilder) Stop(log *logger.Logger) {
+func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -138,7 +114,7 @@ func (r *rebuilder) Stop(log *logger.Logger) {
r.wg.Wait()
r.cancel = nil
r.done = nil
- log.Info(logs.BlobstoreRebuildStopped)
+ log.Info(ctx, logs.BlobstoreRebuildStopped)
}
var errMBIsNotAvailable = errors.New("metabase is not available")
@@ -166,7 +142,7 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres
}
type RebuildPrm struct {
- ConcurrencyLimiter RebuildWorkerLimiter
+ ConcurrencyLimiter common.ConcurrencyLimiter
TargetFillPercent uint32
}
@@ -188,5 +164,30 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
return ErrDegradedMode
}
- return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent))
+ limiter := &rebuildLimiter{
+ concurrencyLimiter: p.ConcurrencyLimiter,
+ rateLimiter: s.opsLimiter,
+ }
+ return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent))
+}
+
+var _ common.RebuildLimiter = (*rebuildLimiter)(nil)
+
+type rebuildLimiter struct {
+ concurrencyLimiter common.ConcurrencyLimiter
+ rateLimiter qos.Limiter
+}
+
+func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
+ return r.concurrencyLimiter.AcquireWorkSlot(ctx)
+}
+
+func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) {
+ release, err := r.rateLimiter.ReadRequest(ctx)
+ return common.ReleaseFunc(release), err
+}
+
+func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) {
+ release, err := r.rateLimiter.WriteRequest(ctx)
+ return common.ReleaseFunc(release), err
}
diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go
index 0025bb45a..d90343265 100644
--- a/pkg/local_object_storage/shard/refill_test.go
+++ b/pkg/local_object_storage/shard/refill_test.go
@@ -34,7 +34,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)},
})
- defer func() { require.NoError(b, sh.Close()) }()
+ defer func() { require.NoError(b, sh.Close(context.Background())) }()
var putPrm PutPrm
@@ -61,7 +61,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, err)
}
- require.NoError(b, sh.Close())
+ require.NoError(b, sh.Close(context.Background()))
require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path))
require.NoError(b, sh.Open(context.Background()))
@@ -72,5 +72,5 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, sh.Init(context.Background()))
- require.NoError(b, sh.Close())
+ require.NoError(b, sh.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index 7dd7189bb..e563f390b 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -59,7 +59,7 @@ func TestShardReload(t *testing.T) {
require.NoError(t, sh.Init(context.Background()))
defer func() {
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
}()
objects := make([]objAddr, 5)
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index 184ca9b71..fbc751e26 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -60,6 +60,12 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
return SelectRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return SelectRes{}, nil
+ }
+ defer release()
+
var selectPrm meta.SelectPrm
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
@@ -67,7 +73,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
- return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
+ return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err)
}
return SelectRes{
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 413bfd2f7..f21541d9d 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -7,6 +7,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -38,6 +39,8 @@ type Shard struct {
rb *rebuilder
+ opsLimiter *atomicOpsLimiter
+
gcCancel atomic.Value
setModeRequested atomic.Bool
writecacheSealCancel atomic.Pointer[writecacheSealCanceler]
@@ -95,20 +98,23 @@ type cfg struct {
metricsWriter MetricsWriter
- reportErrorFunc func(selfID string, message string, err error)
+ reportErrorFunc func(ctx context.Context, selfID string, message string, err error)
containerInfo container.InfoProvider
+
+ configOpsLimiter qos.Limiter
}
func defaultCfg() *cfg {
return &cfg{
rmBatchSize: 100,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
gcCfg: defaultGCCfg(),
- reportErrorFunc: func(string, string, error) {},
+ reportErrorFunc: func(context.Context, string, string, error) {},
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
metricsWriter: noopMetrics{},
+ configOpsLimiter: qos.NewNoopLimiter(),
}
}
@@ -124,14 +130,15 @@ func New(opts ...Option) *Shard {
mb := meta.New(c.metaOpts...)
s := &Shard{
- cfg: c,
- blobStor: bs,
- metaBase: mb,
- tsSource: c.tsSource,
+ cfg: c,
+ blobStor: bs,
+ metaBase: mb,
+ tsSource: c.tsSource,
+ opsLimiter: newAtomicOpsLimiter(c.configOpsLimiter),
}
- reportFunc := func(msg string, err error) {
- s.reportErrorFunc(s.ID().String(), msg, err)
+ reportFunc := func(ctx context.Context, msg string, err error) {
+ s.reportErrorFunc(ctx, s.ID().String(), msg, err)
}
s.blobStor.SetReportErrorFunc(reportFunc)
@@ -141,7 +148,8 @@ func New(opts ...Option) *Shard {
append(c.writeCacheOpts,
writecache.WithReportErrorFunc(reportFunc),
writecache.WithBlobstor(bs),
- writecache.WithMetabase(mb))...)
+ writecache.WithMetabase(mb),
+ writecache.WithQoSLimiter(s.opsLimiter))...)
s.writeCache.GetMetrics().SetPath(s.writeCache.DumpInfo().Path)
}
@@ -201,7 +209,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option {
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = l
- c.gcCfg.log = l
+ c.gcCfg.log = l.WithTag(logger.TagGC)
}
}
@@ -214,7 +222,7 @@ func WithWriteCache(use bool) Option {
// hasWriteCache returns bool if write cache exists on shards.
func (s *Shard) hasWriteCache() bool {
- return s.cfg.useWriteCache
+ return s.useWriteCache
}
// NeedRefillMetabase returns true if metabase is needed to be refilled.
@@ -317,7 +325,7 @@ func WithGCMetrics(v GCMectrics) Option {
// WithReportErrorFunc returns option to specify callback for handling storage-related errors
// in the background workers.
-func WithReportErrorFunc(f func(selfID string, message string, err error)) Option {
+func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option {
return func(c *cfg) {
c.reportErrorFunc = f
}
@@ -368,16 +376,22 @@ func WithContainerInfoProvider(containerInfo container.InfoProvider) Option {
}
}
-func (s *Shard) fillInfo() {
- s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
- s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
- s.cfg.info.Mode = s.GetMode()
+func WithLimiter(l qos.Limiter) Option {
+ return func(c *cfg) {
+ c.configOpsLimiter = l
+ }
+}
- if s.cfg.useWriteCache {
- s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo()
+func (s *Shard) fillInfo() {
+ s.info.MetaBaseInfo = s.metaBase.DumpInfo()
+ s.info.BlobStorInfo = s.blobStor.DumpInfo()
+ s.info.Mode = s.GetMode()
+
+ if s.useWriteCache {
+ s.info.WriteCacheInfo = s.writeCache.DumpInfo()
}
if s.pilorama != nil {
- s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo()
+ s.info.PiloramaInfo = s.pilorama.DumpInfo()
}
}
@@ -401,7 +415,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
cc, err := s.metaBase.ObjectCounters()
if err != nil {
- s.log.Warn(logs.ShardMetaObjectCounterRead,
+ s.log.Warn(ctx, logs.ShardMetaObjectCounterRead,
zap.Error(err),
)
@@ -414,7 +428,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
cnrList, err := s.metaBase.Containers(ctx)
if err != nil {
- s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
+ s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err))
return
}
@@ -423,7 +437,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
for i := range cnrList {
size, err := s.metaBase.ContainerSize(cnrList[i])
if err != nil {
- s.log.Warn(logs.ShardMetaCantReadContainerSize,
+ s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize,
zap.String("cid", cnrList[i].EncodeToString()),
zap.Error(err))
continue
@@ -436,7 +450,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
contCount, err := s.metaBase.ContainerCounters(ctx)
if err != nil {
- s.log.Warn(logs.FailedToGetContainerCounters, zap.Error(err))
+ s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err))
return
}
for contID, count := range contCount.Counts {
@@ -444,57 +458,57 @@ func (s *Shard) updateMetrics(ctx context.Context) {
s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
}
- s.cfg.metricsWriter.SetMode(s.info.Mode)
+ s.metricsWriter.SetMode(s.info.Mode)
}
// incObjectCounter increment both physical and logical object
// counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
- s.cfg.metricsWriter.IncObjectCounter(physical)
- s.cfg.metricsWriter.IncObjectCounter(logical)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
+ s.metricsWriter.IncObjectCounter(physical)
+ s.metricsWriter.IncObjectCounter(logical)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
if isUser {
- s.cfg.metricsWriter.IncObjectCounter(user)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
+ s.metricsWriter.IncObjectCounter(user)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
if v > 0 {
- s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
+ s.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) setObjectCounterBy(typ string, v uint64) {
if v > 0 {
- s.cfg.metricsWriter.SetObjectCounter(typ, v)
+ s.metricsWriter.SetObjectCounter(typ, v)
}
}
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
for cnrID, count := range byCnr {
if count.Phy > 0 {
- s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
}
if count.Logic > 0 {
- s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
}
if count.User > 0 {
- s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
}
}
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
if size != 0 {
- s.cfg.metricsWriter.AddToContainerSize(cnr, size)
+ s.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
if size != 0 {
- s.cfg.metricsWriter.AddToPayloadSize(size)
+ s.metricsWriter.AddToPayloadSize(size)
}
}
@@ -510,3 +524,39 @@ func (s *Shard) SetEvacuationInProgress(val bool) {
s.info.EvacuationInProgress = val
s.metricsWriter.SetEvacuationInProgress(val)
}
+
+var _ qos.Limiter = &atomicOpsLimiter{}
+
+func newAtomicOpsLimiter(l qos.Limiter) *atomicOpsLimiter {
+ result := &atomicOpsLimiter{}
+ result.ptr.Store(&qosLimiterHolder{Limiter: l})
+ return result
+}
+
+type atomicOpsLimiter struct {
+ ptr atomic.Pointer[qosLimiterHolder]
+}
+
+func (a *atomicOpsLimiter) Close() {
+ a.ptr.Load().Close()
+}
+
+func (a *atomicOpsLimiter) ReadRequest(ctx context.Context) (qos.ReleaseFunc, error) {
+ return a.ptr.Load().ReadRequest(ctx)
+}
+
+func (a *atomicOpsLimiter) SetMetrics(m qos.Metrics) {
+ a.ptr.Load().SetMetrics(m)
+}
+
+func (a *atomicOpsLimiter) SetParentID(id string) {
+ a.ptr.Load().SetParentID(id)
+}
+
+func (a *atomicOpsLimiter) WriteRequest(ctx context.Context) (qos.ReleaseFunc, error) {
+ return a.ptr.Load().WriteRequest(ctx)
+}
+
+type qosLimiterHolder struct {
+ qos.Limiter
+}
diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go
index 73ba2e82b..84be71c4d 100644
--- a/pkg/local_object_storage/shard/shard_test.go
+++ b/pkg/local_object_storage/shard/shard_test.go
@@ -60,7 +60,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -89,8 +90,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))),
WithWriteCache(enableWriteCache),
WithWriteCacheOptions(o.wcOpts),
- WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(addresses)
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go
index de00eabd1..b1232707f 100644
--- a/pkg/local_object_storage/shard/shutdown_test.go
+++ b/pkg/local_object_storage/shard/shutdown_test.go
@@ -52,10 +52,10 @@ func TestWriteCacheObjectLoss(t *testing.T) {
})
}
require.NoError(t, errG.Wait())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
var getPrm GetPrm
diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go
index 26dc8ec1e..db361a8bd 100644
--- a/pkg/local_object_storage/shard/tree.go
+++ b/pkg/local_object_storage/shard/tree.go
@@ -43,6 +43,11 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeMove(ctx, d, treeID, m)
}
@@ -75,6 +80,11 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta)
}
@@ -103,9 +113,46 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync)
}
+// TreeApplyBatch implements the pilorama.Forest interface.
+func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+ return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m)
+}
+
// TreeGetByPath implements the pilorama.Forest interface.
func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath",
@@ -130,6 +177,11 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
}
@@ -155,6 +207,11 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n
if s.info.Mode.NoMetabase() {
return pilorama.Meta{}, 0, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return pilorama.Meta{}, 0, err
+ }
+ defer release()
return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID)
}
@@ -180,11 +237,16 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID)
}
// TreeSortedByFilename implements the pilorama.Forest interface.
-func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) {
+func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
@@ -204,6 +266,11 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID
if s.info.Mode.NoMetabase() {
return nil, last, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, last, err
+ }
+ defer release()
return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
}
@@ -229,6 +296,11 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return pilorama.Move{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return pilorama.Move{}, err
+ }
+ defer release()
return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height)
}
@@ -253,6 +325,11 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
return s.pilorama.TreeDrop(ctx, cid, treeID)
}
@@ -276,6 +353,11 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeList(ctx, cid)
}
@@ -299,6 +381,11 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u
if s.pilorama == nil {
return 0, ErrPiloramaDisabled
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
return s.pilorama.TreeHeight(ctx, cid, treeID)
}
@@ -323,6 +410,11 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b
if s.info.Mode.NoMetabase() {
return false, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer release()
return s.pilorama.TreeExists(ctx, cid, treeID)
}
@@ -351,6 +443,11 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
}
@@ -375,6 +472,11 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st
if s.info.Mode.NoMetabase() {
return 0, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID)
}
@@ -396,6 +498,11 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeListTrees(ctx, prm)
}
@@ -425,5 +532,10 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source)
}
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index a6de07f03..9edb89df8 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -67,6 +67,12 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal)
}
@@ -124,12 +130,19 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
close(started)
defer cleanup()
- s.log.Info(logs.StartedWritecacheSealAsync)
- if err := s.writeCache.Seal(ctx, prm); err != nil {
- s.log.Warn(logs.FailedToSealWritecacheAsync, zap.Error(err))
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
return
}
- s.log.Info(logs.WritecacheSealCompletedAsync)
+ defer release()
+
+ s.log.Info(ctx, logs.StartedWritecacheSealAsync)
+ if err := s.writeCache.Seal(ctx, prm); err != nil {
+ s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
+ return
+ }
+ s.log.Info(ctx, logs.WritecacheSealCompletedAsync)
}()
select {
case <-ctx.Done():
@@ -138,5 +151,11 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
return nil
}
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
return s.writeCache.Seal(ctx, prm)
}
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index 79ab7d9c6..fd85b4501 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -43,12 +43,12 @@ func BenchmarkWriteAfterDelete(b *testing.B) {
b.SetParallelism(parallel)
benchmarkRunPar(b, cache, payloadSize)
})
- require.NoError(b, cache.Close())
+ require.NoError(b, cache.Close(context.Background()))
}
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close()) }()
+ defer func() { require.NoError(b, cache.Close(context.Background())) }()
ctx := context.Background()
objGen := testutil.RandObjGenerator{ObjSize: size}
@@ -71,7 +71,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close()) }()
+ defer func() { require.NoError(b, cache.Close(context.Background())) }()
benchmarkRunPar(b, cache, size)
}
@@ -100,7 +100,7 @@ func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening")
- require.NoError(b, cache.Init(), "initializing")
+ require.NoError(b, cache.Init(context.Background()), "initializing")
}
type testMetabase struct{}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index b97fc5856..ee709ea73 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -6,6 +6,7 @@ import (
"sync"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -55,12 +56,13 @@ func New(opts ...Option) Cache {
counter: fstree.NewSimpleCounter(),
options: options{
- log: &logger.Logger{Logger: zap.NewNop()},
+ log: logger.NewLoggerWrapper(zap.NewNop()),
maxObjectSize: defaultMaxObjectSize,
workersCount: defaultFlushWorkersCount,
maxCacheSize: defaultMaxCacheSize,
metrics: DefaultMetrics(),
flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
+ qosLimiter: qos.NewNoopLimiter(),
},
}
@@ -94,23 +96,24 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error {
if err != nil {
return metaerr.Wrap(err)
}
- return metaerr.Wrap(c.initCounters())
+ c.initCounters()
+ return nil
}
// Init runs necessary services.
-func (c *cache) Init() error {
+func (c *cache) Init(ctx context.Context) error {
c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode))
- if err := c.flushAndDropBBoltDB(context.Background()); err != nil {
+ if err := c.flushAndDropBBoltDB(ctx); err != nil {
return fmt.Errorf("flush previous version write-cache database: %w", err)
}
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache
c.cancel.Store(cancel)
c.runFlushLoop(ctx)
return nil
}
// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
-func (c *cache) Close() error {
+func (c *cache) Close(ctx context.Context) error {
if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil {
cancelValue.(context.CancelFunc)()
}
@@ -127,7 +130,7 @@ func (c *cache) Close() error {
var err error
if c.fsTree != nil {
- err = c.fsTree.Close()
+ err = c.fsTree.Close(ctx)
if err != nil {
c.fsTree = nil
}
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index dda284439..94a0a40db 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -46,7 +46,7 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
storageType = StorageTypeFSTree
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
- storagelog.Write(c.log,
+ storagelog.Write(ctx, c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index bfa6aacb0..893d27ba2 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -6,6 +6,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -14,6 +15,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
@@ -35,6 +37,7 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String())
fl := newFlushLimiter(c.flushSizeLimit)
c.wg.Add(1)
go func() {
@@ -64,7 +67,13 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
continue
}
- err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ release, err := c.qosLimiter.ReadRequest(ctx)
+ if err != nil {
+ c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err))
+ c.modeMtx.RUnlock()
+ continue
+ }
+ err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
if err := fl.acquire(oi.DataSize); err != nil {
return err
}
@@ -79,11 +88,15 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
return ctx.Err()
}
})
+ release()
if err != nil {
- c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
+ c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
}
c.modeMtx.RUnlock()
+
+ // counter changed by fstree
+ c.estimateCacheSize()
case <-ctx.Done():
return
}
@@ -107,12 +120,18 @@ func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) {
func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) {
defer fl.release(objInfo.size)
+ release, err := c.qosLimiter.WriteRequest(ctx)
+ if err != nil {
+ c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err))
+ return
+ }
+ defer release()
res, err := c.fsTree.Get(ctx, common.GetPrm{
Address: objInfo.addr,
})
if err != nil {
if !client.IsErrObjectNotFound(err) {
- c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
}
return
}
@@ -126,11 +145,11 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI
c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData)))
}
-func (c *cache) reportFlushError(msg string, addr string, err error) {
+func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) {
if c.reportError != nil {
- c.reportError(msg, err)
+ c.reportError(ctx, msg, err)
} else {
- c.log.Error(msg,
+ c.log.Error(ctx, msg,
zap.String("address", addr),
zap.Error(err))
}
@@ -145,7 +164,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
var obj objectSDK.Object
err := obj.Unmarshal(e.ObjectData)
if err != nil {
- c.reportFlushError(logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
+ c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
if ignoreErrors {
return nil
}
@@ -183,7 +202,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b
if err != nil {
if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
!errors.Is(err, blobstor.ErrNoPlaceFound) {
- c.reportFlushError(logs.FSTreeCantFushObjectBlobstor,
+ c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor,
addr.EncodeToString(), err)
}
return err
@@ -195,7 +214,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b
_, err = c.metabase.UpdateStorageID(ctx, updPrm)
if err != nil {
- c.reportFlushError(logs.FSTreeCantUpdateID,
+ c.reportFlushError(ctx, logs.FSTreeCantUpdateID,
addr.EncodeToString(), err)
}
return err
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 59a4e4895..7fc84657c 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -38,9 +38,9 @@ func TestFlush(t *testing.T) {
errCountOpt := func() (Option, *atomic.Uint32) {
cnt := &atomic.Uint32{}
- return WithReportErrorFunc(func(msg string, err error) {
+ return WithReportErrorFunc(func(ctx context.Context, msg string, err error) {
cnt.Add(1)
- testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
+ testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
}), cnt
}
@@ -114,11 +114,11 @@ func runFlushTest[Option any](
) {
t.Run("no errors", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close()) }()
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
objects := putObjects(t, wc)
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
require.NoError(t, wc.Flush(context.Background(), false, false))
@@ -127,15 +127,15 @@ func runFlushTest[Option any](
t.Run("flush on moving to degraded mode", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close()) }()
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
objects := putObjects(t, wc)
// Blobstor is read-only, so we expect en error from `flush` here.
- require.Error(t, wc.SetMode(mode.Degraded))
+ require.Error(t, wc.SetMode(context.Background(), mode.Degraded))
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
- require.NoError(t, wc.SetMode(mode.Degraded))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, wc.SetMode(context.Background(), mode.Degraded))
check(t, mb, bs, objects)
})
@@ -145,12 +145,12 @@ func runFlushTest[Option any](
t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption()
wc, bs, mb := newCache(t, createCacheFn, errCountOpt)
- defer func() { require.NoError(t, wc.Close()) }()
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
objects := putObjects(t, wc)
f.InjectFn(t, wc)
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
require.Equal(t, uint32(0), errCount.Load())
require.Error(t, wc.Flush(context.Background(), false, false))
@@ -173,7 +173,7 @@ func newCache[Option any](
meta.WithPath(filepath.Join(dir, "meta")),
meta.WithEpochState(dummyEpoch{}))
require.NoError(t, mb.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.Init())
+ require.NoError(t, mb.Init(context.Background()))
bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -184,15 +184,15 @@ func newCache[Option any](
},
}))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init())
+ require.NoError(t, bs.Init(context.Background()))
wc := createCacheFn(t, mb, bs, opts...)
require.NoError(t, wc.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, wc.Init())
+ require.NoError(t, wc.Init(context.Background()))
// First set mode for metabase and blobstor to prevent background flushes.
- require.NoError(t, mb.SetMode(mode.ReadOnly))
- require.NoError(t, bs.SetMode(mode.ReadOnly))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly))
return wc, bs, mb
}
diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go
index 9ec039f91..e369fbd50 100644
--- a/pkg/local_object_storage/writecache/iterate.go
+++ b/pkg/local_object_storage/writecache/iterate.go
@@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
return b.ForEach(func(k, _ []byte) error {
err := addr.DecodeString(string(k))
if err != nil {
- return fmt.Errorf("could not parse object address: %w", err)
+ return fmt.Errorf("parse object address: %w", err)
}
return f(addr)
diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go
index ddc4101be..0e020b36e 100644
--- a/pkg/local_object_storage/writecache/limiter.go
+++ b/pkg/local_object_storage/writecache/limiter.go
@@ -3,6 +3,8 @@ package writecache
import (
"errors"
"sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
var errLimiterClosed = errors.New("acquire failed: limiter closed")
@@ -45,17 +47,11 @@ func (l *flushLimiter) release(size uint64) {
l.cond.L.Lock()
defer l.cond.L.Unlock()
- if l.size >= size {
- l.size -= size
- } else {
- panic("flushLimiter: invalid size")
- }
+ assert.True(l.size >= size, "flushLimiter: invalid size")
+ l.size -= size
- if l.count > 0 {
- l.count--
- } else {
- panic("flushLimiter: invalid count")
- }
+ assert.True(l.count > 0, "flushLimiter: invalid count")
+ l.count--
l.cond.Broadcast()
}
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index d12dd603b..c491be60b 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -23,8 +23,8 @@ type setModePrm struct {
// SetMode sets write-cache mode of operation.
// When shard is put in read-only mode all objects in memory are flushed to disk
// and all background jobs are suspended.
-func (c *cache) SetMode(m mode.Mode) error {
- ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode",
+func (c *cache) SetMode(ctx context.Context, m mode.Mode) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode",
trace.WithAttributes(
attribute.String("mode", m.String()),
))
@@ -60,7 +60,7 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
// guarantees that there are no in-fly operations.
for len(c.flushCh) != 0 {
- c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
+ c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush)
time.Sleep(time.Second)
}
@@ -82,8 +82,8 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
return nil
}
if !shrink {
- if err := c.fsTree.Close(); err != nil {
- return fmt.Errorf("can't close write-cache storage: %w", err)
+ if err := c.fsTree.Close(ctx); err != nil {
+ return fmt.Errorf("close write-cache storage: %w", err)
}
return nil
}
@@ -98,19 +98,19 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
if errors.Is(err, errIterationCompleted) {
empty = false
} else {
- return fmt.Errorf("failed to check write-cache items: %w", err)
+ return fmt.Errorf("check write-cache items: %w", err)
}
}
- if err := c.fsTree.Close(); err != nil {
- return fmt.Errorf("can't close write-cache storage: %w", err)
+ if err := c.fsTree.Close(ctx); err != nil {
+ return fmt.Errorf("close write-cache storage: %w", err)
}
if empty {
err := os.RemoveAll(c.path)
if err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("failed to remove write-cache files: %w", err)
+ return fmt.Errorf("remove write-cache files: %w", err)
}
} else {
- c.log.Info(logs.WritecacheShrinkSkippedNotEmpty)
+ c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
}
return nil
}
diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go
index 70cfe8382..4fbadbc64 100644
--- a/pkg/local_object_storage/writecache/mode_test.go
+++ b/pkg/local_object_storage/writecache/mode_test.go
@@ -18,13 +18,13 @@ func TestMode(t *testing.T) {
require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init())
+ require.NoError(t, wc.Init(context.Background()))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close())
+ require.NoError(t, wc.Close(context.Background()))
require.NoError(t, wc.Open(context.Background(), mode.Degraded))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init())
+ require.NoError(t, wc.Init(context.Background()))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close())
+ require.NoError(t, wc.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 66ac7805c..a4f98ad06 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,8 +1,10 @@
package writecache
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
// Option represents write-cache configuration option.
@@ -29,19 +31,21 @@ type options struct {
// noSync is true iff FSTree allows unsynchronized writes.
noSync bool
// reportError is the function called when encountering disk errors in background workers.
- reportError func(string, error)
+ reportError func(context.Context, string, error)
// metrics is metrics implementation
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
disableBackgroundFlush bool
// flushSizeLimit is total size of flushing objects.
flushSizeLimit uint64
+ // qosLimiter used to limit flush RPS.
+ qosLimiter qos.Limiter
}
// WithLogger sets logger.
func WithLogger(log *logger.Logger) Option {
return func(o *options) {
- o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))}
+ o.log = log
}
}
@@ -108,7 +112,7 @@ func WithNoSync(noSync bool) Option {
}
// WithReportErrorFunc sets error reporting function.
-func WithReportErrorFunc(f func(string, error)) Option {
+func WithReportErrorFunc(f func(context.Context, string, error)) Option {
return func(o *options) {
o.reportError = f
}
@@ -134,3 +138,9 @@ func WithFlushSizeLimit(v uint64) Option {
o.flushSizeLimit = v
}
}
+
+func WithQoSLimiter(l qos.Limiter) Option {
+ return func(o *options) {
+ o.qosLimiter = l
+ }
+}
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index c53067bea..2fbf50913 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -2,6 +2,7 @@ package writecache
import (
"context"
+ "fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -59,7 +60,15 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro
// putBig writes object to FSTree and pushes it to the flush workers queue.
func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
- if !c.hasEnoughSpaceFS() {
+ if prm.RawData == nil { // foolproof: RawData should be marshalled by shard.
+ data, err := prm.Object.Marshal()
+ if err != nil {
+ return fmt.Errorf("cannot marshal object: %w", err)
+ }
+ prm.RawData = data
+ }
+ size := uint64(len(prm.RawData))
+ if !c.hasEnoughSpace(size) {
return ErrOutOfSpace
}
@@ -68,7 +77,7 @@ func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
return err
}
- storagelog.Write(c.log,
+ storagelog.Write(ctx, c.log,
storagelog.AddressField(prm.Address.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree PUT"),
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index 835686fbb..7a52d3672 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -7,10 +7,6 @@ func (c *cache) estimateCacheSize() (uint64, uint64) {
return count, size
}
-func (c *cache) hasEnoughSpaceFS() bool {
- return c.hasEnoughSpace(c.maxObjectSize)
-}
-
func (c *cache) hasEnoughSpace(objectSize uint64) bool {
count, size := c.estimateCacheSize()
if c.maxCacheCount > 0 && count+1 > c.maxCacheCount {
@@ -19,7 +15,6 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool {
return c.maxCacheSize >= size+objectSize
}
-func (c *cache) initCounters() error {
+func (c *cache) initCounters() {
c.estimateCacheSize()
- return nil
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 2e52e5b20..e88566cdf 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
fstree.WithFileCounter(c.counter),
)
if err := c.fsTree.Open(mod); err != nil {
- return fmt.Errorf("could not open FSTree: %w", err)
+ return fmt.Errorf("open FSTree: %w", err)
}
if err := c.fsTree.Init(); err != nil {
- return fmt.Errorf("could not init FSTree: %w", err)
+ return fmt.Errorf("init FSTree: %w", err)
}
return nil
@@ -43,9 +43,9 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
if err != nil && !client.IsErrObjectNotFound(err) {
- c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
+ c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
} else if err == nil {
- storagelog.Write(c.log,
+ storagelog.Write(ctx, c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go
index 3a100f1a3..5eb341ba4 100644
--- a/pkg/local_object_storage/writecache/upgrade.go
+++ b/pkg/local_object_storage/writecache/upgrade.go
@@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
return nil
}
if err != nil {
- return fmt.Errorf("could not check write-cache database existence: %w", err)
+ return fmt.Errorf("check write-cache database existence: %w", err)
}
db, err := OpenDB(c.path, true, os.OpenFile)
if err != nil {
- return fmt.Errorf("could not open write-cache database: %w", err)
+ return fmt.Errorf("open write-cache database: %w", err)
}
defer func() {
_ = db.Close()
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index a973df604..7ed511318 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -38,21 +38,21 @@ type Cache interface {
// Returns ErrReadOnly if the Cache is currently in the read-only mode.
Delete(context.Context, oid.Address) error
Put(context.Context, common.PutPrm) (common.PutRes, error)
- SetMode(mode.Mode) error
+ SetMode(context.Context, mode.Mode) error
SetLogger(*logger.Logger)
DumpInfo() Info
Flush(context.Context, bool, bool) error
Seal(context.Context, SealPrm) error
- Init() error
+ Init(context.Context) error
Open(ctx context.Context, mode mode.Mode) error
- Close() error
+ Close(context.Context) error
GetMetrics() Metrics
}
// MainStorage is the interface of the underlying storage of Cache implementations.
type MainStorage interface {
- Compressor() *compression.Config
+ Compressor() *compression.Compressor
Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error)
Put(context.Context, common.PutPrm) (common.PutRes, error)
}
diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go
index aae245acd..4462daab4 100644
--- a/pkg/morph/client/balance/balanceOf.go
+++ b/pkg/morph/client/balance/balanceOf.go
@@ -1,36 +1,33 @@
package balance
import (
+ "context"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// BalanceOf receives the amount of funds in the client's account
// through the Balance contract call, and returns it.
-func (c *Client) BalanceOf(id user.ID) (*big.Int, error) {
- h, err := address.StringToUint160(id.EncodeToString())
- if err != nil {
- return nil, err
- }
+func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) {
+ h := id.ScriptHash()
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(balanceOfMethod)
invokePrm.SetArgs(h)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err)
} else if ln := len(prms); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln)
}
amount, err := client.BigIntFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err)
}
return amount, nil
}
diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go
index 4befbef45..f4685b0ab 100644
--- a/pkg/morph/client/balance/burn.go
+++ b/pkg/morph/client/balance/burn.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -30,12 +32,12 @@ func (b *BurnPrm) SetID(id []byte) {
}
// Burn destroys funds from the account.
-func (c *Client) Burn(p BurnPrm) error {
+func (c *Client) Burn(ctx context.Context, p BurnPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(burnMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go
index b05c526dc..1dacb9574 100644
--- a/pkg/morph/client/balance/client.go
+++ b/pkg/morph/client/balance/client.go
@@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("could not create static client of Balance contract: %w", err)
+ return nil, fmt.Errorf("create 'balance' contract client: %w", err)
}
return &Client{
@@ -54,15 +54,7 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go
index 39e4b28e5..57e61d62b 100644
--- a/pkg/morph/client/balance/decimals.go
+++ b/pkg/morph/client/balance/decimals.go
@@ -1,6 +1,7 @@
package balance
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,20 +9,20 @@ import (
// Decimals decimal precision of currency transactions
// through the Balance contract call, and returns it.
-func (c *Client) Decimals() (uint32, error) {
+func (c *Client) Decimals(ctx context.Context) (uint32, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(decimalsMethod)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err)
} else if ln := len(prms); ln != 1 {
return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln)
}
decimals, err := client.IntFromStackItem(prms[0])
if err != nil {
- return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err)
}
return uint32(decimals), nil
}
diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go
index a5b206799..83e8b0586 100644
--- a/pkg/morph/client/balance/lock.go
+++ b/pkg/morph/client/balance/lock.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -42,12 +44,12 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) {
}
// Lock locks fund on the user account.
-func (c *Client) Lock(p LockPrm) error {
+func (c *Client) Lock(ctx context.Context, p LockPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(lockMethod)
prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go
index 73448da31..082ade85e 100644
--- a/pkg/morph/client/balance/mint.go
+++ b/pkg/morph/client/balance/mint.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -30,12 +32,12 @@ func (m *MintPrm) SetID(id []byte) {
}
// Mint sends funds to the account.
-func (c *Client) Mint(p MintPrm) error {
+func (c *Client) Mint(ctx context.Context, p MintPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(mintMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go
index 08fb05289..870bed166 100644
--- a/pkg/morph/client/balance/transfer.go
+++ b/pkg/morph/client/balance/transfer.go
@@ -1,11 +1,11 @@
package balance
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// TransferPrm groups parameters of TransferX method.
@@ -21,27 +21,18 @@ type TransferPrm struct {
// TransferX transfers p.Amount of GASe-12 from p.From to p.To
// with details p.Details through direct smart contract call.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) TransferX(p TransferPrm) error {
- from, err := address.StringToUint160(p.From.EncodeToString())
- if err != nil {
- return err
- }
-
- to, err := address.StringToUint160(p.To.EncodeToString())
- if err != nil {
- return err
- }
+func (c *Client) TransferX(ctx context.Context, p TransferPrm) error {
+ from := p.From.ScriptHash()
+ to := p.To.ScriptHash()
prm := client.InvokePrm{}
prm.SetMethod(transferXMethod)
prm.SetArgs(from, to, p.Amount, p.Details)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err = c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err)
+ return fmt.Errorf("invoke method (%s): %w", transferXMethod, err)
}
return nil
}
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index 933f1039f..aab058d27 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -9,6 +9,7 @@ import (
"sync/atomic"
"time"
+ nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
@@ -60,6 +61,9 @@ type Client struct {
rpcActor *actor.Actor // neo-go RPC actor
gasToken *nep17.Token // neo-go GAS token wrapper
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
+ nnsHash util.Uint160 // NNS contract hash
+
+ nnsReader *nnsClient.ContractReader // NNS contract wrapper
acc *wallet.Account // neo account
accAddr util.Uint160 // account's address
@@ -94,27 +98,12 @@ type Client struct {
type cache struct {
m sync.RWMutex
- nnsHash *util.Uint160
gKey *keys.PublicKey
txHeights *lru.Cache[util.Uint256, uint32]
metrics metrics.MorphCacheMetrics
}
-func (c *cache) nns() *util.Uint160 {
- c.m.RLock()
- defer c.m.RUnlock()
-
- return c.nnsHash
-}
-
-func (c *cache) setNNSHash(nnsHash util.Uint160) {
- c.m.Lock()
- defer c.m.Unlock()
-
- c.nnsHash = &nnsHash
-}
-
func (c *cache) groupKey() *keys.PublicKey {
c.m.RLock()
defer c.m.RUnlock()
@@ -133,7 +122,6 @@ func (c *cache) invalidate() {
c.m.Lock()
defer c.m.Unlock()
- c.nnsHash = nil
c.gKey = nil
c.txHeights.Purge()
}
@@ -163,24 +151,10 @@ func (e *notHaltStateError) Error() string {
)
}
-// implementation of error interface for FrostFS-specific errors.
-type frostfsError struct {
- err error
-}
-
-func (e frostfsError) Error() string {
- return fmt.Sprintf("frostfs error: %v", e.err)
-}
-
-// wraps FrostFS-specific error into frostfsError. Arg must not be nil.
-func wrapFrostFSError(err error) error {
- return frostfsError{err}
-}
-
// Invoke invokes contract method by sending transaction into blockchain.
// Returns valid until block value.
// Supported args types: int64, string, util.Uint160, []byte and bool.
-func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
+func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) {
start := time.Now()
success := false
defer func() {
@@ -191,29 +165,29 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string,
defer c.switchLock.RUnlock()
if c.inactive {
- return 0, ErrConnectionLost
+ return InvokeRes{}, ErrConnectionLost
}
txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
if err != nil {
- return 0, fmt.Errorf("could not invoke %s: %w", method, err)
+ return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err)
}
- c.logger.Debug(logs.ClientNeoClientInvoke,
+ c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
zap.String("method", method),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
success = true
- return vub, nil
+ return InvokeRes{Hash: txHash, VUB: vub}, nil
}
// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
// If cb returns an error, the session is closed and this error is returned as-is.
-// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
+// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
// The default batchSize is 100, the default limit from neo-go.
-func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error {
+func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error {
start := time.Now()
success := false
defer func() {
@@ -240,7 +214,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
if err != nil {
return err
} else if val.State != HaltState {
- return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
+ return ¬HaltStateError{state: val.State, exception: val.FaultException}
}
arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err)
@@ -262,10 +236,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
}()
// Batch size for TraverseIterator() can restricted on the server-side.
- traverseBatchSize := batchSize
- if invoker.DefaultIteratorResultItems < traverseBatchSize {
- traverseBatchSize = invoker.DefaultIteratorResultItems
- }
+ traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems)
for {
items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize)
if err != nil {
@@ -307,7 +278,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (
}
if val.State != HaltState {
- return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
+ return nil, ¬HaltStateError{state: val.State, exception: val.FaultException}
}
success = true
@@ -328,7 +299,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
return err
}
- c.logger.Debug(logs.ClientNativeGasTransferInvoke,
+ c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke,
zap.String("to", receiver.StringLE()),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -362,7 +333,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
return err
}
- c.logger.Debug(logs.ClientBatchGasTransferInvoke,
+ c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke,
zap.Strings("to", receiversLog),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -389,8 +360,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(logs.ClientCantGetBlockchainHeight,
- zap.String("error", err.Error()))
+ c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
+ zap.Error(err))
return nil
}
@@ -403,8 +374,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(logs.ClientCantGetBlockchainHeight243,
- zap.String("error", err.Error()))
+ c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
+ zap.Error(err))
return nil
}
@@ -499,7 +470,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
// NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain
// stores alphabet node keys of inner ring there, however the sidechain stores both
// alphabet and non alphabet node keys of inner ring.
-func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
+func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -509,7 +480,7 @@ func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
list, err := c.roleList(noderoles.NeoFSAlphabet)
if err != nil {
- return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err)
+ return nil, fmt.Errorf("get alphabet nodes role list: %w", err)
}
return list, nil
@@ -523,7 +494,7 @@ func (c *Client) GetDesignateHash() util.Uint160 {
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
height, err := c.rpcActor.GetBlockCount()
if err != nil {
- return nil, fmt.Errorf("can't get chain height: %w", err)
+ return nil, fmt.Errorf("get chain height: %w", err)
}
return c.rolemgmt.GetDesignatedByRole(r, height)
@@ -594,6 +565,7 @@ func (c *Client) setActor(act *actor.Actor) {
c.rpcActor = act
c.gasToken = nep17.New(act, gas.Hash)
c.rolemgmt = rolemgmt.New(act)
+ c.nnsReader = nnsClient.NewReader(act, c.nnsHash)
}
func (c *Client) GetActor() *actor.Actor {
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index 08d16deb4..e4dcd0db7 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -61,7 +61,7 @@ var ErrNoHealthyEndpoint = errors.New("no healthy endpoint")
func defaultConfig() *cfg {
return &cfg{
dialTimeout: defaultDialTimeout,
- logger: &logger.Logger{Logger: zap.L()},
+ logger: logger.NewLoggerWrapper(zap.L()),
metrics: morphmetrics.NoopRegister{},
waitInterval: defaultWaitInterval,
signer: &transaction.Signer{
@@ -130,10 +130,10 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
for cli.endpoints.curr, endpoint = range cli.endpoints.list {
cli.client, act, err = cli.newCli(ctx, endpoint)
if err != nil {
- cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
+ cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
zap.Error(err), zap.String("endpoint", endpoint.Address))
} else {
- cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint,
+ cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint,
zap.String("endpoint", endpoint.Address))
if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
cli.switchIsActive.Store(true)
@@ -145,6 +145,11 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
if cli.client == nil {
return nil, ErrNoHealthyEndpoint
}
+ cs, err := cli.client.GetContractStateByID(nnsContractID)
+ if err != nil {
+ return nil, fmt.Errorf("resolve nns hash: %w", err)
+ }
+ cli.nnsHash = cs.Hash
cli.setActor(act)
go cli.closeWaiter(ctx)
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index b512a6594..be684619b 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -27,7 +27,6 @@ const (
getMethod = "get"
listMethod = "list"
containersOfMethod = "containersOf"
- eaclMethod = "eACL"
deletionInfoMethod = "deletionInfo"
// putNamedMethod is method name for container put with an alias. It is exported to provide custom fee.
@@ -47,9 +46,9 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
opts[i](o)
}
- sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...)
+ sc, err := client.NewStatic(cli, contract, fee, *o...)
if err != nil {
- return nil, fmt.Errorf("can't create container static client: %w", err)
+ return nil, fmt.Errorf("create 'container' contract client: %w", err)
}
return &Client{client: sc}, nil
@@ -69,20 +68,10 @@ func (c Client) ContractAddress() util.Uint160 {
// parameter of Wrapper.
type Option func(*opts)
-type opts struct {
- staticOpts []client.StaticClientOption
-}
+type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- o.staticOpts = append(o.staticOpts, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
@@ -92,6 +81,6 @@ func TryNotary() Option {
// Considered to be used by IR nodes only.
func AsAlphabet() Option {
return func(o *opts) {
- o.staticOpts = append(o.staticOpts, client.AsAlphabet())
+ *o = append(*o, client.AsAlphabet())
}
}
diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go
index c4db0fe6e..60fb8ad7c 100644
--- a/pkg/morph/client/container/containers_of.go
+++ b/pkg/morph/client/container/containers_of.go
@@ -1,10 +1,9 @@
package container
import (
+ "context"
"errors"
- "fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
@@ -15,28 +14,37 @@ import (
// to the specified user of FrostFS system. If idUser is nil, returns the list of all containers.
//
// If remote RPC does not support neo-go session API, fallback to List() method.
-func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
- var rawID []byte
+func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) {
+ var cidList []cid.ID
+ var err error
+ cb := func(id cid.ID) error {
+ cidList = append(cidList, id)
+ return nil
+ }
+ if err = c.IterateContainersOf(ctx, idUser, cb); err != nil {
+ return nil, err
+ }
+ return cidList, nil
+}
+
+// iterateContainers iterates over a list of container identifiers
+// belonging to the specified user of FrostFS system and executes
+// `cb` on each element. If idUser is nil, calls it on the list of all containers.
+func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error {
+ var rawID []byte
if idUser != nil {
rawID = idUser.WalletBytes()
}
- var cidList []cid.ID
- cb := func(item stackitem.Item) error {
- rawID, err := client.BytesFromStackItem(item)
+ itemCb := func(item stackitem.Item) error {
+ id, err := getCIDfromStackItem(item)
if err != nil {
- return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err)
+ return err
}
-
- var id cid.ID
-
- err = id.Decode(rawID)
- if err != nil {
- return fmt.Errorf("decode container ID: %w", err)
+ if err = cb(id); err != nil {
+ return err
}
-
- cidList = append(cidList, id)
return nil
}
@@ -50,13 +58,10 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
const batchSize = 512
cnrHash := c.client.ContractAddress()
- err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID)
- if err != nil {
- if errors.Is(err, unwrap.ErrNoSessionID) {
- return c.list(idUser)
- }
- return nil, err
+ err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID)
+ if err != nil && errors.Is(err, unwrap.ErrNoSessionID) {
+ return c.iterate(ctx, idUser, cb)
}
- return cidList, nil
+ return err
}
diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go
index 20351b570..09912efa5 100644
--- a/pkg/morph/client/container/delete.go
+++ b/pkg/morph/client/container/delete.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/sha256"
"fmt"
@@ -12,7 +13,7 @@ import (
// along with signature and session token.
//
// Returns error if container ID is nil.
-func Delete(c *Client, witness core.RemovalWitness) error {
+func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
binCnr := make([]byte, sha256.Size)
witness.ContainerID.Encode(binCnr)
@@ -26,7 +27,7 @@ func Delete(c *Client, witness core.RemovalWitness) error {
prm.SetToken(tok.Marshal())
}
- _, err := c.Delete(prm)
+ _, err := c.Delete(ctx, prm)
return err
}
@@ -65,9 +66,7 @@ func (d *DeletePrm) SetKey(key []byte) {
//
// Returns valid until block and any error encountered that caused
// the removal to interrupt.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) Delete(p DeletePrm) (uint32, error) {
+func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
if len(p.signature) == 0 && !p.IsControl() {
return 0, errNilArgument
}
@@ -77,9 +76,9 @@ func (c *Client) Delete(p DeletePrm) (uint32, error) {
prm.SetArgs(p.cnr, p.signature, p.key, p.token)
prm.InvokePrmOptional = p.InvokePrmOptional
- res, err := c.client.Invoke(prm)
+ res, err := c.client.Invoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err)
+ return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err)
}
return res.VUB, nil
}
diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go
index dda6bf98c..90bcdd7d5 100644
--- a/pkg/morph/client/container/deletion_info.go
+++ b/pkg/morph/client/container/deletion_info.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/sha256"
"fmt"
"strings"
@@ -14,39 +15,39 @@ import (
"github.com/mr-tron/base58"
)
-func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) {
- return DeletionInfo((*Client)(x), cnr)
+func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) {
+ return DeletionInfo(ctx, (*Client)(x), cnr)
}
type deletionInfo interface {
- DeletionInfo(cid []byte) (*containercore.DelInfo, error)
+ DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error)
}
-func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
+func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.DeletionInfo(binCnr)
+ return c.DeletionInfo(ctx, binCnr)
}
-func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
+func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(deletionInfoMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err)
}
if len(arr) != 2 {
@@ -55,17 +56,17 @@ func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
rawOwner, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err)
}
var owner user.ID
if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil {
- return nil, fmt.Errorf("could not decode container owner id (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err)
}
epoch, err := client.BigIntFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err)
}
return &containercore.DelInfo{
diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go
deleted file mode 100644
index 9e604e091..000000000
--- a/pkg/morph/client/container/eacl.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package container
-
-import (
- "crypto/sha256"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
-)
-
-// GetEACL reads the extended ACL table from FrostFS system
-// through Container contract call.
-//
-// Returns apistatus.EACLNotFound if eACL table is missing in the contract.
-func (c *Client) GetEACL(cnr cid.ID) (*container.EACL, error) {
- binCnr := make([]byte, sha256.Size)
- cnr.Encode(binCnr)
-
- prm := client.TestInvokePrm{}
- prm.SetMethod(eaclMethod)
- prm.SetArgs(binCnr)
-
- prms, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", eaclMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", eaclMethod, ln)
- }
-
- arr, err := client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get item array of eACL (%s): %w", eaclMethod, err)
- }
-
- if len(arr) != 4 {
- return nil, fmt.Errorf("unexpected eacl stack item count (%s): %d", eaclMethod, len(arr))
- }
-
- rawEACL, err := client.BytesFromStackItem(arr[0])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL (%s): %w", eaclMethod, err)
- }
-
- sig, err := client.BytesFromStackItem(arr[1])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL signature (%s): %w", eaclMethod, err)
- }
-
- // Client may not return errors if the table is missing, so check this case additionally.
- // The absence of a signature in the response can be taken as an eACL absence criterion,
- // since unsigned table cannot be approved in the storage by design.
- if len(sig) == 0 {
- return nil, new(apistatus.EACLNotFound)
- }
-
- pub, err := client.BytesFromStackItem(arr[2])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL public key (%s): %w", eaclMethod, err)
- }
-
- binToken, err := client.BytesFromStackItem(arr[3])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL session token (%s): %w", eaclMethod, err)
- }
-
- var res container.EACL
-
- res.Value = eacl.NewTable()
- if err = res.Value.Unmarshal(rawEACL); err != nil {
- return nil, err
- }
-
- if len(binToken) > 0 {
- res.Session = new(session.Container)
-
- err = res.Session.Unmarshal(binToken)
- if err != nil {
- return nil, fmt.Errorf("could not unmarshal session token: %w", err)
- }
- }
-
- // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
- var sigV2 refs.Signature
- sigV2.SetKey(pub)
- sigV2.SetSign(sig)
- sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256)
-
- err = res.Signature.ReadFromV2(sigV2)
- return &res, err
-}
diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go
index ea57a3a95..8622d2cdd 100644
--- a/pkg/morph/client/container/get.go
+++ b/pkg/morph/client/container/get.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/sha256"
"fmt"
"strings"
@@ -16,8 +17,8 @@ import (
type containerSource Client
-func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) {
- return Get((*Client)(x), cnr)
+func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) {
+ return Get(ctx, (*Client)(x), cnr)
}
// AsContainerSource provides container Source interface
@@ -27,15 +28,15 @@ func AsContainerSource(w *Client) containercore.Source {
}
type getContainer interface {
- Get(cid []byte) (*containercore.Container, error)
+ Get(ctx context.Context, cid []byte) (*containercore.Container, error)
}
// Get marshals container ID, and passes it to Wrapper's Get method.
-func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) {
+func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.Get(binCnr)
+ return c.Get(ctx, binCnr)
}
// Get reads the container from FrostFS system by binary identifier
@@ -43,24 +44,24 @@ func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) {
//
// If an empty slice is returned for the requested identifier,
// storage.ErrNotFound error is returned.
-func (c *Client) Get(cid []byte) (*containercore.Container, error) {
+func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(getMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err)
}
if len(arr) != 4 {
@@ -69,29 +70,29 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) {
cnrBytes, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err)
}
sigBytes, err := client.BytesFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err)
}
pub, err := client.BytesFromStackItem(arr[2])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err)
}
tokBytes, err := client.BytesFromStackItem(arr[3])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err)
}
var cnr containercore.Container
if err := cnr.Value.Unmarshal(cnrBytes); err != nil {
// use other major version if there any
- return nil, fmt.Errorf("can't unmarshal container: %w", err)
+ return nil, fmt.Errorf("unmarshal container: %w", err)
}
if len(tokBytes) > 0 {
@@ -99,7 +100,7 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) {
err = cnr.Session.Unmarshal(tokBytes)
if err != nil {
- return nil, fmt.Errorf("could not unmarshal session token: %w", err)
+ return nil, fmt.Errorf("unmarshal session token: %w", err)
}
}
diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go
index 6fed46c1a..fc63d1beb 100644
--- a/pkg/morph/client/container/list.go
+++ b/pkg/morph/client/container/list.go
@@ -1,20 +1,22 @@
package container
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
-// list returns a list of container identifiers belonging
+// iterate iterates through a list of container identifiers belonging
// to the specified user of FrostFS system. The list is composed
// through Container contract call.
//
-// Returns the identifiers of all FrostFS containers if pointer
+// Iterates through the identifiers of all FrostFS containers if pointer
// to user identifier is nil.
-func (c *Client) list(idUser *user.ID) ([]cid.ID, error) {
+func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error {
var rawID []byte
if idUser != nil {
@@ -25,34 +27,43 @@ func (c *Client) list(idUser *user.ID) ([]cid.ID, error) {
prm.SetMethod(listMethod)
prm.SetArgs(rawID)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err)
+ return fmt.Errorf("test invoke (%s): %w", listMethod, err)
} else if ln := len(res); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
+ return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
}
res, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err)
+ return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err)
}
- cidList := make([]cid.ID, 0, len(res))
for i := range res {
- rawID, err := client.BytesFromStackItem(res[i])
+ id, err := getCIDfromStackItem(res[i])
if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err)
+ return err
}
- var id cid.ID
-
- err = id.Decode(rawID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
+ if err = cb(id); err != nil {
+ return err
}
-
- cidList = append(cidList, id)
}
- return cidList, nil
+ return nil
+}
+
+func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) {
+ rawID, err := client.BytesFromStackItem(item)
+ if err != nil {
+ return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err)
+ }
+
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
+ }
+ return id, nil
}
diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go
index 777ae2d4e..3bb84eb87 100644
--- a/pkg/morph/client/container/put.go
+++ b/pkg/morph/client/container/put.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"fmt"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
@@ -14,7 +15,7 @@ import (
// along with sig.Key() and sig.Sign().
//
// Returns error if container is nil.
-func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
+func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) {
data := cnr.Value.Marshal()
d := container.ReadDomain(cnr.Value)
@@ -35,7 +36,7 @@ func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
prm.SetKey(sigV2.GetKey())
prm.SetSignature(sigV2.GetSign())
- err := c.Put(prm)
+ err := c.Put(ctx, prm)
if err != nil {
return nil, err
}
@@ -93,9 +94,7 @@ func (p *PutPrm) SetZone(zone string) {
//
// Returns calculated container identifier and any error
// encountered that caused the saving to interrupt.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) Put(p PutPrm) error {
+func (c *Client) Put(ctx context.Context, p PutPrm) error {
if len(p.sig) == 0 || len(p.key) == 0 {
return errNilArgument
}
@@ -116,9 +115,9 @@ func (c *Client) Put(p PutPrm) error {
prm.SetMethod(method)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", method, err)
+ return fmt.Errorf("invoke method (%s): %w", method, err)
}
return nil
}
diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go
index 016b56f8f..d3eba7639 100644
--- a/pkg/morph/client/frostfs/cheque.go
+++ b/pkg/morph/client/frostfs/cheque.go
@@ -1,6 +1,8 @@
package frostfscontract
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -37,13 +39,13 @@ func (c *ChequePrm) SetLock(lock util.Uint160) {
}
// Cheque invokes `cheque` method of FrostFS contract.
-func (x *Client) Cheque(p ChequePrm) error {
+func (x *Client) Cheque(ctx context.Context, p ChequePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(chequeMethod)
prm.SetArgs(p.id, p.user, p.amount, p.lock)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(prm)
+ _, err := x.client.Invoke(ctx, prm)
return err
}
@@ -66,12 +68,12 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) {
}
// AlphabetUpdate update list of alphabet nodes.
-func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error {
+func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(alphabetUpdateMethod)
prm.SetArgs(p.id, p.pubs)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(prm)
+ _, err := x.client.Invoke(ctx, prm)
return err
}
diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go
index 571915c27..cd6a9849e 100644
--- a/pkg/morph/client/frostfs/client.go
+++ b/pkg/morph/client/frostfs/client.go
@@ -35,7 +35,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err)
+ return nil, fmt.Errorf("create 'frostfs' contract client: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go
index 4c31f42de..61eb03f09 100644
--- a/pkg/morph/client/frostfsid/client.go
+++ b/pkg/morph/client/frostfsid/client.go
@@ -27,7 +27,7 @@ var _ frostfsidcore.SubjectProvider = (*Client)(nil)
func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) {
sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet())
if err != nil {
- return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err)
+ return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go
index 0852f536c..3a789672a 100644
--- a/pkg/morph/client/frostfsid/subject.go
+++ b/pkg/morph/client/frostfsid/subject.go
@@ -1,6 +1,7 @@
package frostfsid
import (
+ "context"
"fmt"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
@@ -14,14 +15,14 @@ const (
methodGetSubjectExtended = "getSubjectExtended"
)
-func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
+func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(methodGetSubject)
prm.SetArgs(addr)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err)
}
structArr, err := checkStackItem(res)
@@ -31,20 +32,20 @@ func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error)
subj, err := frostfsidclient.ParseSubject(structArr)
if err != nil {
- return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
}
return subj, nil
}
-func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
+func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(methodGetSubjectExtended)
prm.SetArgs(addr)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubjectExtended, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err)
}
structArr, err := checkStackItem(res)
@@ -54,7 +55,7 @@ func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.Subject
subj, err := frostfsidclient.ParseSubjectExtended(structArr)
if err != nil {
- return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
}
return subj, nil
@@ -67,7 +68,7 @@ func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error
structArr, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get item array of container (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err)
}
return
}
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index 10ed21582..b9e39c25e 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -2,6 +2,7 @@ package client
import (
"context"
+ "slices"
"sort"
"time"
@@ -42,7 +43,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
newEndpoint := c.endpoints.list[c.endpoints.curr]
cli, act, err := c.newCli(ctx, newEndpoint)
if err != nil {
- c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
+ c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
zap.String("endpoint", newEndpoint.Address),
zap.Error(err),
)
@@ -52,7 +53,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
c.cache.invalidate()
- c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
+ c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
zap.String("endpoint", newEndpoint.Address))
c.client = cli
@@ -99,8 +100,7 @@ mainLoop:
case <-t.C:
c.switchLock.RLock()
- endpointsCopy := make([]Endpoint, len(c.endpoints.list))
- copy(endpointsCopy, c.endpoints.list)
+ endpointsCopy := slices.Clone(c.endpoints.list)
currPriority := c.endpoints.list[c.endpoints.curr].Priority
highestPriority := c.endpoints.list[0].Priority
@@ -122,7 +122,7 @@ mainLoop:
cli, act, err := c.newCli(ctx, e)
if err != nil {
- c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
+ c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
zap.String("endpoint", tryE),
zap.Error(err),
)
@@ -147,7 +147,7 @@ mainLoop:
c.switchLock.Unlock()
- c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC,
+ c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC,
zap.String("endpoint", tryE))
return
diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go
index eafa097e9..de8afbfb5 100644
--- a/pkg/morph/client/netmap/client.go
+++ b/pkg/morph/client/netmap/client.go
@@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("can't create netmap static client: %w", err)
+ return nil, fmt.Errorf("create 'netmap' contract client: %w", err)
}
return &Client{client: sc}, nil
@@ -65,15 +65,7 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go
index 2d19a8193..3f6aed506 100644
--- a/pkg/morph/client/netmap/config.go
+++ b/pkg/morph/client/netmap/config.go
@@ -1,7 +1,7 @@
package netmap
import (
- "errors"
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -24,75 +24,45 @@ const (
// MaxObjectSize receives max object size configuration
// value through the Netmap contract call.
-func (c *Client) MaxObjectSize() (uint64, error) {
- objectSize, err := c.readUInt64Config(MaxObjectSizeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err)
- }
-
- return objectSize, nil
+func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, MaxObjectSizeConfig)
}
// EpochDuration returns number of sidechain blocks per one FrostFS epoch.
-func (c *Client) EpochDuration() (uint64, error) {
- epochDuration, err := c.readUInt64Config(EpochDurationConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err)
- }
-
- return epochDuration, nil
+func (c *Client) EpochDuration(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, EpochDurationConfig)
}
// ContainerFee returns fee paid by container owner to each alphabet node
// for container registration.
-func (c *Client) ContainerFee() (uint64, error) {
- fee, err := c.readUInt64Config(ContainerFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) ContainerFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, ContainerFeeConfig)
}
// ContainerAliasFee returns additional fee paid by container owner to each
// alphabet node for container nice name registration.
-func (c *Client) ContainerAliasFee() (uint64, error) {
- fee, err := c.readUInt64Config(ContainerAliasFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, ContainerAliasFeeConfig)
}
// HomomorphicHashDisabled returns global configuration value of homomorphic hashing
// settings.
//
// Returns (false, nil) if config key is not found in the contract.
-func (c *Client) HomomorphicHashDisabled() (bool, error) {
- return c.readBoolConfig(HomomorphicHashingDisabledKey)
+func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) {
+ return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey)
}
// InnerRingCandidateFee returns global configuration value of fee paid by
// node to be in inner ring candidates list.
-func (c *Client) InnerRingCandidateFee() (uint64, error) {
- fee, err := c.readUInt64Config(IrCandidateFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, IrCandidateFeeConfig)
}
// WithdrawFee returns global configuration value of fee paid by user to
// withdraw assets from FrostFS contract.
-func (c *Client) WithdrawFee() (uint64, error) {
- fee, err := c.readUInt64Config(WithdrawFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, WithdrawFeeConfig)
}
// MaintenanceModeAllowed reads admission of "maintenance" state from the
@@ -100,34 +70,32 @@ func (c *Client) WithdrawFee() (uint64, error) {
// that storage nodes are allowed to switch their state to "maintenance".
//
// By default, maintenance state is disallowed.
-func (c *Client) MaintenanceModeAllowed() (bool, error) {
- return c.readBoolConfig(MaintenanceModeAllowedConfig)
+func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) {
+ return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig)
}
-func (c *Client) readUInt64Config(key string) (uint64, error) {
- v, err := c.config([]byte(key), IntegerAssert)
+func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) {
+ v, err := c.config(ctx, []byte(key))
+ if err != nil {
+ return 0, fmt.Errorf("read netconfig value '%s': %w", key, err)
+ }
+
+ bi, err := v.TryInteger()
if err != nil {
return 0, err
}
-
- // IntegerAssert is guaranteed to return int64 if the error is nil.
- return uint64(v.(int64)), nil
+ return bi.Uint64(), nil
}
// reads boolean value by the given key from the FrostFS network configuration
// stored in the Sidechain. Returns false if key is not presented.
-func (c *Client) readBoolConfig(key string) (bool, error) {
- v, err := c.config([]byte(key), BoolAssert)
+func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) {
+ v, err := c.config(ctx, []byte(key))
if err != nil {
- if errors.Is(err, ErrConfigNotFound) {
- return false, nil
- }
-
- return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err)
+ return false, fmt.Errorf("read netconfig value '%s': %w", key, err)
}
- // BoolAssert is guaranteed to return bool if the error is nil.
- return v.(bool), nil
+ return v.TryBool()
}
// SetConfigPrm groups parameters of SetConfig operation.
@@ -155,13 +123,13 @@ func (s *SetConfigPrm) SetValue(value any) {
}
// SetConfig sets config field.
-func (c *Client) SetConfig(p SetConfigPrm) error {
+func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(setConfigMethod)
prm.SetArgs(p.id, p.key, p.value)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
@@ -198,14 +166,14 @@ type NetworkConfiguration struct {
}
// ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain.
-func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
+func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) {
var res NetworkConfiguration
prm := client.TestInvokePrm{}
prm.SetMethod(configListMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return res, fmt.Errorf("could not perform test invocation (%s): %w",
+ return res, fmt.Errorf("test invoke (%s): %w",
configListMethod, err)
}
@@ -276,22 +244,18 @@ func bytesToBool(val []byte) bool {
return false
}
-// ErrConfigNotFound is returned when the requested key was not found
-// in the network config (returned value is `Null`).
-var ErrConfigNotFound = errors.New("config value not found")
-
// config performs the test invoke of get config value
// method of FrostFS Netmap contract.
//
// Returns ErrConfigNotFound if config key is not found in the contract.
-func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) {
+func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(configMethod)
prm.SetArgs(key)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
configMethod, err)
}
@@ -300,26 +264,7 @@ func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (a
configMethod, ln)
}
- if _, ok := items[0].(stackitem.Null); ok {
- return nil, ErrConfigNotFound
- }
-
- return assert(items[0])
-}
-
-// IntegerAssert converts stack item to int64.
-func IntegerAssert(item stackitem.Item) (any, error) {
- return client.IntFromStackItem(item)
-}
-
-// StringAssert converts stack item to string.
-func StringAssert(item stackitem.Item) (any, error) {
- return client.StringFromStackItem(item)
-}
-
-// BoolAssert converts stack item to bool.
-func BoolAssert(item stackitem.Item) (any, error) {
- return client.BoolFromStackItem(item)
+ return items[0], nil
}
// iterateRecords iterates over all config records and passes them to f.
diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go
index 92d569ae2..8561329ec 100644
--- a/pkg/morph/client/netmap/epoch.go
+++ b/pkg/morph/client/netmap/epoch.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,13 +9,13 @@ import (
// Epoch receives number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) Epoch() (uint64, error) {
+func (c *Client) Epoch(ctx context.Context) (uint64, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(epochMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w",
+ return 0, fmt.Errorf("test invoke (%s): %w",
epochMethod, err)
}
@@ -25,20 +26,20 @@ func (c *Client) Epoch() (uint64, error) {
num, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err)
+ return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err)
}
return uint64(num), nil
}
// LastEpochBlock receives block number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) LastEpochBlock() (uint32, error) {
+func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(lastEpochBlockMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w",
+ return 0, fmt.Errorf("test invoke (%s): %w",
lastEpochBlockMethod, err)
}
@@ -49,7 +50,7 @@ func (c *Client) LastEpochBlock() (uint32, error) {
block, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("could not get number from stack item (%s): %w",
+ return 0, fmt.Errorf("get number from stack item (%s): %w",
lastEpochBlockMethod, err)
}
return uint32(block), nil
diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go
index d6f8c56b2..0e1f9186b 100644
--- a/pkg/morph/client/netmap/innerring.go
+++ b/pkg/morph/client/netmap/innerring.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"crypto/elliptic"
"fmt"
@@ -23,7 +24,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) {
}
// UpdateInnerRing updates inner ring keys.
-func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
+func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
args := make([][]byte, len(p.keys))
for i := range args {
args[i] = p.keys[i].Bytes()
@@ -34,18 +35,18 @@ func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
prm.SetArgs(args)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
// GetInnerRingList return current IR list.
-func (c *Client) GetInnerRingList() (keys.PublicKeys, error) {
+func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(innerRingListMethod)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err)
}
return irKeysFromStackItem(prms, innerRingListMethod)
@@ -58,7 +59,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys
irs, err := client.ArrayFromStackItem(stack[0])
if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
+ return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err)
}
irKeys := make(keys.PublicKeys, len(irs))
@@ -78,7 +79,7 @@ const irNodeFixedPrmNumber = 1
func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
prms, err := client.ArrayFromStackItem(prm)
if err != nil {
- return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err)
+ return nil, fmt.Errorf("get stack item array (IRNode): %w", err)
} else if ln := len(prms); ln != irNodeFixedPrmNumber {
return nil, fmt.Errorf(
"unexpected stack item count (IRNode): expected %d, has %d",
@@ -89,7 +90,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
byteKey, err := client.BytesFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err)
+ return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err)
}
return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256())
diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go
index f7b5c3ba4..97782fc25 100644
--- a/pkg/morph/client/netmap/netmap.go
+++ b/pkg/morph/client/netmap/netmap.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
@@ -11,14 +12,14 @@ import (
// GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and
// decodes netmap.NetMap from the response.
-func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(epochSnapshotMethod)
invokePrm.SetArgs(epoch)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
epochSnapshotMethod, err)
}
@@ -34,13 +35,13 @@ func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
// GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo
// from the response.
-func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
+func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapCandidatesMethod)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err)
}
if len(res) > 0 {
@@ -51,13 +52,13 @@ func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
}
// NetMap calls "netmap" method and decode netmap.NetMap from the response.
-func (c *Client) NetMap() (*netmap.NetMap, error) {
+func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapMethod)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
netMapMethod, err)
}
diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go
index ded386c86..341b20935 100644
--- a/pkg/morph/client/netmap/new_epoch.go
+++ b/pkg/morph/client/netmap/new_epoch.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,14 +9,14 @@ import (
// NewEpoch updates FrostFS epoch number through
// Netmap contract call.
-func (c *Client) NewEpoch(epoch uint64) error {
+func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
+ return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
}
return nil
}
@@ -24,16 +25,16 @@ func (c *Client) NewEpoch(epoch uint64) error {
// control notary transaction internally to ensure all
// nodes produce the same transaction with high probability.
// If vub > 0, vub will be used as valid until block value.
-func (c *Client) NewEpochControl(epoch uint64, vub uint32) (uint32, error) {
+func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
prm.SetControlTX(true)
prm.SetVUB(vub)
- res, err := c.client.Invoke(prm)
+ res, err := c.client.Invoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
+ return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
}
return res.VUB, nil
}
diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go
index 764bbc899..e83acde39 100644
--- a/pkg/morph/client/netmap/peer.go
+++ b/pkg/morph/client/netmap/peer.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"errors"
"fmt"
@@ -24,7 +25,7 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) {
// AddPeer registers peer in FrostFS network through
// Netmap contract call.
-func (c *Client) AddPeer(p AddPeerPrm) error {
+func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
method := addPeerMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -39,15 +40,15 @@ func (c *Client) AddPeer(p AddPeerPrm) error {
prm.SetArgs(p.nodeInfo.Marshal())
prm.InvokePrmOptional = p.InvokePrmOptional
- if _, err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", method, err)
+ if _, err := c.client.Invoke(ctx, prm); err != nil {
+ return fmt.Errorf("invoke method (%s): %w", method, err)
}
return nil
}
// ForceRemovePeer marks the given peer as offline via a notary control transaction.
// If vub > 0, vub will be used as valid until block value.
-func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
+func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
if !c.client.WithNotary() {
return 0, errFailedToRemovePeerWithoutNotary
}
@@ -57,9 +58,9 @@ func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32,
prm.SetControlTX(true)
prm.SetVUB(vub)
- vub, err := c.UpdatePeerState(prm)
+ res, err := c.UpdatePeerState(ctx, prm)
if err != nil {
return 0, fmt.Errorf("updating peer state: %v", err)
}
- return vub, nil
+ return res.VUB, nil
}
diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go
index ba2c26af7..9dbec1a90 100644
--- a/pkg/morph/client/netmap/snapshot.go
+++ b/pkg/morph/client/netmap/snapshot.go
@@ -1,19 +1,22 @@
package netmap
import (
+ "context"
+ "fmt"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
// GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response.
-func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(snapshotMethod)
prm.SetArgs(diff)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err)
}
return DecodeNetMap(res)
diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go
index 7c3a4e8cd..f9f639c19 100644
--- a/pkg/morph/client/netmap/update_state.go
+++ b/pkg/morph/client/netmap/update_state.go
@@ -1,7 +1,7 @@
package netmap
import (
- "fmt"
+ "context"
"git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -36,7 +36,7 @@ func (u *UpdatePeerPrm) SetMaintenance() {
}
// UpdatePeerState changes peer status through Netmap contract call.
-func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) {
+func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) {
method := updateStateMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -55,9 +55,5 @@ func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) {
prm.SetArgs(int64(p.state), p.key)
prm.InvokePrmOptional = p.InvokePrmOptional
- res, err := c.client.Invoke(prm)
- if err != nil {
- return 0, fmt.Errorf("could not invoke smart contract: %w", err)
- }
- return res.VUB, nil
+ return c.client.Invoke(ctx, prm)
}
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index 218f7ad8e..bc00eb889 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -8,14 +8,12 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
)
const (
@@ -37,12 +35,8 @@ const (
NNSPolicyContractName = "policy.frostfs"
)
-var (
- // ErrNNSRecordNotFound means that there is no such record in NNS contract.
- ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
-
- errEmptyResultStack = errors.New("returned result stack is empty")
-)
+// ErrNNSRecordNotFound means that there is no such record in NNS contract.
+var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
// based on alphabet index.
@@ -61,97 +55,36 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) {
return util.Uint160{}, ErrConnectionLost
}
- nnsHash, err := c.NNSHash()
- if err != nil {
- return util.Uint160{}, err
- }
-
- sh, err = nnsResolve(c.client, nnsHash, name)
+ sh, err = nnsResolve(c.nnsReader, name)
if err != nil {
return sh, fmt.Errorf("NNS.resolve: %w", err)
}
return sh, nil
}
-// NNSHash returns NNS contract hash.
-func (c *Client) NNSHash() (util.Uint160, error) {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- if c.inactive {
- return util.Uint160{}, ErrConnectionLost
- }
-
- success := false
- startedAt := time.Now()
-
- defer func() {
- c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt))
- }()
-
- nnsHash := c.cache.nns()
-
- if nnsHash == nil {
- cs, err := c.client.GetContractStateByID(nnsContractID)
- if err != nil {
- return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err)
- }
-
- c.cache.setNNSHash(cs.Hash)
- nnsHash = &cs.Hash
- }
- success = true
- return *nnsHash, nil
-}
-
-func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
- found, err := exists(c, nnsHash, domain)
+func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) {
+ available, err := r.IsAvailable(domain)
if err != nil {
- return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err)
+ return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
}
- if !found {
+ if available {
return nil, ErrNNSRecordNotFound
}
- result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{
- {
- Type: smartcontract.StringType,
- Value: domain,
- },
- {
- Type: smartcontract.IntegerType,
- Value: big.NewInt(int64(nns.TXT)),
- },
- }, nil)
- if err != nil {
- return nil, err
- }
- if result.State != vmstate.Halt.String() {
- return nil, fmt.Errorf("invocation failed: %s", result.FaultException)
- }
- if len(result.Stack) == 0 {
- return nil, errEmptyResultStack
- }
- return result.Stack[0], nil
+ return r.Resolve(domain, big.NewInt(int64(nns.TXT)))
}
-func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) {
- res, err := nnsResolveItem(c, nnsHash, domain)
+func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) {
+ arr, err := nnsResolveItem(r, domain)
if err != nil {
return util.Uint160{}, err
}
- // Parse the result of resolving NNS record.
- // It works with multiple formats (corresponding to multiple NNS versions).
- // If array of hashes is provided, it returns only the first one.
- if arr, ok := res.Value().([]stackitem.Item); ok {
- if len(arr) == 0 {
- return util.Uint160{}, errors.New("NNS record is missing")
- }
- res = arr[0]
+ if len(arr) == 0 {
+ return util.Uint160{}, errors.New("NNS record is missing")
}
- bs, err := res.TryBytes()
+ bs, err := arr[0].TryBytes()
if err != nil {
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
}
@@ -171,33 +104,6 @@ func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (uti
return util.Uint160{}, errors.New("no valid hashes are found")
}
-func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) {
- result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{
- {
- Type: smartcontract.StringType,
- Value: domain,
- },
- }, nil)
- if err != nil {
- return false, err
- }
-
- if len(result.Stack) == 0 {
- return false, errEmptyResultStack
- }
-
- res := result.Stack[0]
-
- available, err := res.TryBool()
- if err != nil {
- return false, fmt.Errorf("malformed response: %w", err)
- }
-
- // not available means that it is taken
- // and, therefore, exists
- return !available, nil
-}
-
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
// Should be called for side-chain client only.
func (c *Client) SetGroupSignerScope() error {
@@ -241,18 +147,12 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
return gKey, nil
}
- nnsHash, err := c.NNSHash()
+ arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName)
if err != nil {
return nil, err
}
- item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName)
- if err != nil {
- return nil, err
- }
-
- arr, ok := item.Value().([]stackitem.Item)
- if !ok || len(arr) == 0 {
+ if len(arr) == 0 {
return nil, errors.New("NNS record is missing")
}
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 2a500b31b..448702613 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -1,6 +1,7 @@
package client
import (
+ "context"
"crypto/elliptic"
"encoding/binary"
"errors"
@@ -37,8 +38,7 @@ type (
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
- notary util.Uint160
- proxy util.Uint160
+ proxy util.Uint160
}
notaryCfg struct {
@@ -57,16 +57,11 @@ const (
defaultNotaryValidTime = 50
defaultNotaryRoundTime = 100
- notaryBalanceOfMethod = "balanceOf"
- notaryExpirationOfMethod = "expirationOf"
- setDesignateMethod = "designateAsRole"
+ setDesignateMethod = "designateAsRole"
- notaryBalanceErrMsg = "can't fetch notary balance"
notaryNotEnabledPanicMsg = "notary support was not enabled on this client"
)
-var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack")
-
func defaultNotaryConfig(c *Client) *notaryCfg {
return ¬aryCfg{
txValidTime: defaultNotaryValidTime,
@@ -106,7 +101,6 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error {
txValidTime: cfg.txValidTime,
roundTime: cfg.roundTime,
alphabetSource: cfg.alphabetSource,
- notary: notary.Hash,
}
c.notary = notaryCfg
@@ -140,7 +134,7 @@ func (c *Client) ProbeNotary() (res bool) {
// use this function.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256, error) {
+func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -154,16 +148,17 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err)
+ return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err)
}
- currentTill, err := c.depositExpirationOf()
+ r := notary.NewReader(c.rpcActor)
+ currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash())
if err != nil {
- return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err)
+ return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err)
}
- till := max(int64(bc+delta), currentTill)
- res, _, err := c.depositNotary(amount, till)
+ till := max(int64(bc+delta), int64(currentTill))
+ res, _, err := c.depositNotary(ctx, amount, till)
return res, err
}
@@ -172,7 +167,7 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256
// This allows to avoid ValidAfterDeposit failures.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (util.Uint256, uint32, error) {
+func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -185,23 +180,23 @@ func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (util.Uint256, uint3
}
// till value refers to a block height and it is uint32 value in neo-go
- return c.depositNotary(amount, math.MaxUint32)
+ return c.depositNotary(ctx, amount, math.MaxUint32)
}
-func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
+func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
txHash, vub, err := c.gasToken.Transfer(
c.accAddr,
- c.notary.notary,
+ notary.Hash,
big.NewInt(int64(amount)),
[]any{c.acc.PrivateKey().GetScriptHash(), till})
if err != nil {
if !errors.Is(err, neorpc.ErrAlreadyExists) {
- return util.Uint256{}, 0, fmt.Errorf("can't make notary deposit: %w", err)
+ return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err)
}
// Transaction is already in mempool waiting to be processed.
// This is an expected situation if we restart the service.
- c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade,
+ c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
@@ -209,7 +204,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256,
return util.Uint256{}, 0, nil
}
- c.logger.Info(logs.ClientNotaryDepositInvoke,
+ c.logger.Info(ctx, logs.ClientNotaryDepositInvoke,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
@@ -236,18 +231,10 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) {
sh := c.acc.PrivateKey().PublicKey().GetScriptHash()
- items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh)
+ r := notary.NewReader(c.rpcActor)
+ bigIntDeposit, err := r.BalanceOf(sh)
if err != nil {
- return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)
- }
-
- if len(items) != 1 {
- return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems))
- }
-
- bigIntDeposit, err := items[0].TryInteger()
- if err != nil {
- return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err))
+ return 0, fmt.Errorf("get notary deposit: %w", err)
}
return bigIntDeposit.Int64(), nil
@@ -274,7 +261,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) {
// committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
+func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -288,10 +275,11 @@ func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
+ ctx,
setDesignateMethod,
nonce,
vub,
@@ -322,7 +310,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) {
// Requires committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
+func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -336,10 +324,11 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
+ ctx,
setDesignateMethod,
nonce,
vub,
@@ -355,19 +344,19 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
// Returns valid until block value.
//
// `nonce` and `vub` are used only if notary is enabled.
-func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) {
+func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return 0, ErrConnectionLost
+ return InvokeRes{}, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(contract, fee, method, args...)
+ return c.Invoke(ctx, contract, fee, method, args...)
}
- return c.notaryInvoke(false, true, contract, nonce, vub, method, args...)
+ return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...)
}
// NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's
@@ -375,19 +364,19 @@ func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce ui
// not expected to be signed by the current node.
//
// Considered to be used by non-IR nodes.
-func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) {
+func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return 0, ErrConnectionLost
+ return InvokeRes{}, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(contract, fee, method, args...)
+ return c.Invoke(ctx, contract, fee, method, args...)
}
- return c.notaryInvoke(false, false, contract, rand.Uint32(), vubP, method, args...)
+ return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...)
}
// NotarySignAndInvokeTX signs and sends notary request that was received from
@@ -404,7 +393,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return fmt.Errorf("could not fetch current alphabet keys: %w", err)
+ return fmt.Errorf("fetch current alphabet keys: %w", err)
}
cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList)
@@ -429,7 +418,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return err
}
- c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked,
+ c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked,
zap.String("tx_hash", mainH.StringLE()),
zap.Uint32("valid_until_block", untilActual),
zap.String("fallback_hash", fbH.StringLE()))
@@ -437,13 +426,13 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return nil
}
-func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error {
+func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error {
designate := c.GetDesignateHash()
- _, err := c.notaryInvoke(true, true, designate, nonce, &vub, method, args...)
+ _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...)
return err
}
-func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) {
+func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
start := time.Now()
success := false
defer func() {
@@ -452,27 +441,27 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return 0, err
+ return InvokeRes{}, err
}
until, err := c.getUntilValue(vub)
if err != nil {
- return 0, err
+ return InvokeRes{}, err
}
cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee)
if err != nil {
- return 0, err
+ return InvokeRes{}, err
}
nAct, err := notary.NewActor(c.client, cosigners, c.acc)
if err != nil {
- return 0, err
+ return InvokeRes{}, err
}
mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != vmstate.Halt.String() {
- return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
+ return ¬HaltStateError{state: r.State, exception: r.FaultException}
}
t.ValidUntilBlock = until
@@ -482,17 +471,17 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
}, args...))
if err != nil && !alreadyOnChainError(err) {
- return 0, err
+ return InvokeRes{}, err
}
- c.logger.Debug(logs.ClientNotaryRequestInvoked,
+ c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked,
zap.String("method", method),
zap.Uint32("valid_until_block", untilActual),
zap.String("tx_hash", mainH.StringLE()),
zap.String("fallback_hash", fbH.StringLE()))
success = true
- return until, nil
+ return InvokeRes{Hash: mainH, VUB: until}, nil
}
func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) {
@@ -526,24 +515,24 @@ func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabet
if ok {
pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err)
+ return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err)
}
acc = notary.FakeSimpleAccount(pub)
} else {
m, pubsBytes, ok := vm.ParseMultiSigContract(script)
if !ok {
- return nil, errors.New("failed to parse verification script of signer #2: unknown witness type")
+ return nil, errors.New("parse verification script of signer #2: unknown witness type")
}
pubs := make(keys.PublicKeys, len(pubsBytes))
for i := range pubs {
pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err)
+ return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err)
}
}
acc, err = notary.FakeMultisigAccount(m, pubs)
if err != nil {
- return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err)
+ return nil, fmt.Errorf("create fake account for signer #2: %w", err)
}
}
}
@@ -619,8 +608,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey())
err := multisigAccount.ConvertMultisig(m, ir)
if err != nil {
- // wrap error as FrostFS-specific since the call is not related to any client
- return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err))
+ return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err)
}
} else {
// alphabet multisig redeem script is
@@ -628,8 +616,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
// inner ring multiaddress witness
multisigAccount, err = notary.FakeMultisigAccount(m, ir)
if err != nil {
- // wrap error as FrostFS-specific since the call is not related to any client
- return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err))
+ return nil, fmt.Errorf("make inner ring multisig wallet: %w", err)
}
}
@@ -639,7 +626,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
func (c *Client) notaryTxValidationLimit() (uint32, error) {
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return 0, fmt.Errorf("can't get current blockchain height: %w", err)
+ return 0, fmt.Errorf("get current blockchain height: %w", err)
}
minTime := bc + c.notary.txValidTime
@@ -648,24 +635,6 @@ func (c *Client) notaryTxValidationLimit() (uint32, error) {
return rounded, nil
}
-func (c *Client) depositExpirationOf() (int64, error) {
- expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash())
- if err != nil {
- return 0, fmt.Errorf("can't invoke method: %w", err)
- }
-
- if len(expirationRes) != 1 {
- return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes))
- }
-
- currentTillBig, err := expirationRes[0].TryInteger()
- if err != nil {
- return 0, fmt.Errorf("can't parse deposit till value: %w", err)
- }
-
- return currentTillBig.Int64(), nil
-}
-
// sigCount returns the number of required signature.
// For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT).
// If committee is true, returns M as N/2+1.
@@ -739,12 +708,12 @@ func alreadyOnChainError(err error) bool {
func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) {
notaryBalance, err := c.GetNotaryDeposit()
if err != nil {
- return 0, fmt.Errorf("could not get notary balance: %w", err)
+ return 0, fmt.Errorf("get notary balance: %w", err)
}
gasBalance, err := c.GasBalance()
if err != nil {
- return 0, fmt.Errorf("could not get GAS balance: %w", err)
+ return 0, fmt.Errorf("get GAS balance: %w", err)
}
if gasBalance == 0 {
@@ -793,12 +762,12 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool)
if hash != nil {
height, err = c.getTransactionHeight(*hash)
if err != nil {
- return 0, 0, fmt.Errorf("could not get transaction height: %w", err)
+ return 0, 0, fmt.Errorf("get transaction height: %w", err)
}
} else {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- return 0, 0, fmt.Errorf("could not get chain height: %w", err)
+ return 0, 0, fmt.Errorf("get chain height: %w", err)
}
}
diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go
index dfcf62b83..c4eb120d2 100644
--- a/pkg/morph/client/static.go
+++ b/pkg/morph/client/static.go
@@ -1,8 +1,10 @@
package client
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -128,7 +130,8 @@ func (i *InvokePrmOptional) SetVUB(v uint32) {
}
type InvokeRes struct {
- VUB uint32
+ Hash util.Uint256
+ VUB uint32
}
// Invoke calls Invoke method of Client with static internal script hash and fee.
@@ -140,9 +143,7 @@ type InvokeRes struct {
//
// If fee for the operation executed using specified method is customized, then StaticClient uses it.
// Otherwise, default fee is used.
-func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) {
- var res InvokeRes
- var err error
+func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) {
var vubP *uint32
if s.tryNotary {
if s.alpha {
@@ -159,7 +160,7 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) {
nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash)
}
if err != nil {
- return InvokeRes{}, fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err)
+ return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err)
}
vubP = &vub
@@ -169,25 +170,23 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) {
vubP = &prm.vub
}
- res.VUB, err = s.client.NotaryInvoke(s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
- return res, err
+ return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
}
if prm.vub > 0 {
vubP = &prm.vub
}
- res.VUB, err = s.client.NotaryInvokeNotAlpha(s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
- return res, err
+ return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
}
- res.VUB, err = s.client.Invoke(
+ return s.client.Invoke(
+ ctx,
s.scScriptHash,
s.fee,
prm.method,
prm.args...,
)
- return res, err
}
// TestInvokePrm groups parameters of the TestInvoke operation.
@@ -207,7 +206,9 @@ func (ti *TestInvokePrm) SetArgs(args ...any) {
}
// TestInvoke calls TestInvoke method of Client with static internal script hash.
-func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) {
+func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method)
+ defer span.End()
return s.client.TestInvoke(
s.scScriptHash,
prm.method,
diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go
index cd55d6bd2..f7b6705a8 100644
--- a/pkg/morph/client/util.go
+++ b/pkg/morph/client/util.go
@@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) {
case stackitem.IntegerT:
n, err := param.TryInteger()
if err != nil {
- return nil, fmt.Errorf("can't parse integer bytes: %w", err)
+ return nil, fmt.Errorf("parse integer bytes: %w", err)
}
return n.Bytes(), nil
@@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) {
func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error {
return func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != HaltState {
- return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
+ return ¬HaltStateError{state: r.State, exception: r.FaultException}
}
t.SystemFee += add
diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go
new file mode 100644
index 000000000..87fcf84b8
--- /dev/null
+++ b/pkg/morph/client/waiter.go
@@ -0,0 +1,51 @@
+package client
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
+)
+
+type waiterClient struct {
+ c *Client
+}
+
+func (w *waiterClient) Context() context.Context {
+ return context.Background()
+}
+
+func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
+ return w.c.GetApplicationLog(hash, trig)
+}
+
+func (w *waiterClient) GetBlockCount() (uint32, error) {
+ return w.c.BlockCount()
+}
+
+func (w *waiterClient) GetVersion() (*result.Version, error) {
+ return w.c.GetVersion()
+}
+
+// WaitTxHalt waits until transaction with the specified hash persists on the blockchain.
+// It also checks execution result to finish in HALT state.
+func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error {
+ w, err := waiter.NewPollingBased(&waiterClient{c: c})
+ if err != nil {
+ return fmt.Errorf("create tx waiter: %w", err)
+ }
+
+ res, err := w.WaitAny(ctx, vub, h)
+ if err != nil {
+ return fmt.Errorf("wait until tx persists: %w", err)
+ }
+
+ if res.VMState.HasFlag(vmstate.Halt) {
+ return nil
+ }
+ return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}
+}
diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go
index 062a2a886..99f80584a 100644
--- a/pkg/morph/event/balance/lock.go
+++ b/pkg/morph/event/balance/lock.go
@@ -3,7 +3,7 @@ package balance
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -47,61 +47,17 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash }
// ParseLock from notification into lock structure.
func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Lock
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var le balance.LockEvent
+ if err := le.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse balance.LockEvent: %w", err)
}
- if ln := len(params); ln != 5 {
- return nil, event.WrongNumberOfParameters(5, ln)
- }
-
- // parse id
- ev.id, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get lock id: %w", err)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get lock user value: %w", err)
- }
-
- ev.user, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err)
- }
-
- // parse lock account
- lock, err := client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get lock account value: %w", err)
- }
-
- ev.lock, err = util.Uint160DecodeBytesBE(lock)
- if err != nil {
- return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err)
- }
-
- // parse amount
- ev.amount, err = client.IntFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get lock amount: %w", err)
- }
-
- // parse until deadline
- ev.until, err = client.IntFromStackItem(params[4])
- if err != nil {
- return nil, fmt.Errorf("could not get lock deadline: %w", err)
- }
-
- ev.txHash = e.Container
-
- return ev, nil
+ return Lock{
+ id: le.TxID,
+ user: le.From,
+ lock: le.To,
+ amount: le.Amount.Int64(),
+ until: le.Until.Int64(),
+ txHash: e.Container,
+ }, nil
}
diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go
index 9199bcd55..87b91aede 100644
--- a/pkg/morph/event/balance/lock_test.go
+++ b/pkg/morph/event/balance/lock_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -28,7 +27,7 @@ func TestParseLock(t *testing.T) {
}
_, err := ParseLock(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go
index a206307f8..d28f6d521 100644
--- a/pkg/morph/event/container/delete.go
+++ b/pkg/morph/event/container/delete.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -58,28 +58,14 @@ func (DeleteSuccess) MorphEvent() {}
// ParseDeleteSuccess decodes notification event thrown by Container contract into
// DeleteSuccess and returns it as event.Event.
func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- items, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
+ var dse container.DeleteSuccessEvent
+ if err := dse.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err)
}
- const expectedItemNumDeleteSuccess = 1
-
- if ln := len(items); ln != expectedItemNumDeleteSuccess {
- return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln)
- }
-
- binID, err := client.BytesFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("parse container ID item: %w", err)
- }
-
- var res DeleteSuccess
-
- err = res.ID.Decode(binID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
- }
-
- return res, nil
+ var cnr cid.ID
+ cnr.SetSHA256(dse.ContainerID)
+ return DeleteSuccess{
+ ID: cnr,
+ }, nil
}
diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go
index 627c5fcf5..62e7d7277 100644
--- a/pkg/morph/event/container/delete_test.go
+++ b/pkg/morph/event/container/delete_test.go
@@ -4,7 +4,6 @@ import (
"crypto/sha256"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -18,7 +17,7 @@ func TestParseDeleteSuccess(t *testing.T) {
}
_, err := ParseDeleteSuccess(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong container parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go
index 335034bf3..b09394ba4 100644
--- a/pkg/morph/event/container/put.go
+++ b/pkg/morph/event/container/put.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -78,33 +78,14 @@ func (PutSuccess) MorphEvent() {}
// ParsePutSuccess decodes notification event thrown by Container contract into
// PutSuccess and returns it as event.Event.
func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- items, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
+ var pse container.PutSuccessEvent
+ if err := pse.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err)
}
- const expectedItemNumPutSuccess = 2
-
- if ln := len(items); ln != expectedItemNumPutSuccess {
- return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln)
- }
-
- binID, err := client.BytesFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("parse container ID item: %w", err)
- }
-
- _, err = client.BytesFromStackItem(items[1])
- if err != nil {
- return nil, fmt.Errorf("parse public key item: %w", err)
- }
-
- var res PutSuccess
-
- err = res.ID.Decode(binID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
- }
-
- return res, nil
+ var cnr cid.ID
+ cnr.SetSHA256(pse.ContainerID)
+ return PutSuccess{
+ ID: cnr,
+ }, nil
}
diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go
index 3622f9943..dd5c7ea93 100644
--- a/pkg/morph/event/container/put_test.go
+++ b/pkg/morph/event/container/put_test.go
@@ -4,8 +4,8 @@ import (
"crypto/sha256"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -17,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) {
}
_, err := ParsePutSuccess(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong container ID parameter", func(t *testing.T) {
@@ -35,18 +35,30 @@ func TestParsePutSuccess(t *testing.T) {
id.Encode(binID)
t.Run("wrong public key parameter", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewMap(),
- }))
+ t.Run("wrong type", func(t *testing.T) {
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewMap(),
+ }))
- require.Error(t, err)
+ require.Error(t, err)
+ })
+ t.Run("garbage data", func(t *testing.T) {
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewByteArray([]byte("key")),
+ }))
+ require.Error(t, err)
+ })
})
t.Run("correct behavior", func(t *testing.T) {
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(binID),
- stackitem.NewByteArray([]byte("key")),
+ stackitem.NewByteArray(pk.PublicKey().Bytes()),
}))
require.NoError(t, err)
diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go
index eae2a23f5..cf56464b8 100644
--- a/pkg/morph/event/frostfs/cheque.go
+++ b/pkg/morph/event/frostfs/cheque.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,53 +34,20 @@ func (c Cheque) LockAccount() util.Uint160 { return c.LockValue }
// ParseCheque from notification into cheque structure.
func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Cheque
- err error
- )
+ var ce frostfs.ChequeEvent
+ if err := ce.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err)
+ }
- params, err := event.ParseStackArray(e)
+ lock, err := util.Uint160DecodeBytesBE(ce.LockAccount)
if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err)
}
- if ln := len(params); ln != 4 {
- return nil, event.WrongNumberOfParameters(4, ln)
- }
-
- // parse id
- ev.IDValue, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque id: %w", err)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque user: %w", err)
- }
-
- ev.UserValue, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err)
- }
-
- // parse amount
- ev.AmountValue, err = client.IntFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque amount: %w", err)
- }
-
- // parse lock account
- lock, err := client.BytesFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque lock account: %w", err)
- }
-
- ev.LockValue, err = util.Uint160DecodeBytesBE(lock)
- if err != nil {
- return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err)
- }
-
- return ev, nil
+ return Cheque{
+ IDValue: ce.Id,
+ AmountValue: ce.Amount.Int64(),
+ UserValue: ce.User,
+ LockValue: lock,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go
index ab177757f..d92b7922b 100644
--- a/pkg/morph/event/frostfs/cheque_test.go
+++ b/pkg/morph/event/frostfs/cheque_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -27,7 +26,7 @@ func TestParseCheque(t *testing.T) {
}
_, err := ParseCheque(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go
index 4c87634c2..805e80f3c 100644
--- a/pkg/morph/event/frostfs/config.go
+++ b/pkg/morph/event/frostfs/config.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -36,39 +36,15 @@ func (u Config) Key() []byte { return u.KeyValue }
func (u Config) Value() []byte { return u.ValueValue }
func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Config
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var sce frostfs.SetConfigEvent
+ if err := sce.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err)
}
- if ln := len(params); ln != 3 {
- return nil, event.WrongNumberOfParameters(3, ln)
- }
-
- // parse id
- ev.IDValue, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get config update id: %w", err)
- }
-
- // parse key
- ev.KeyValue, err = client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get config key: %w", err)
- }
-
- // parse value
- ev.ValueValue, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get config value: %w", err)
- }
-
- ev.TxHashValue = e.Container
-
- return ev, nil
+ return Config{
+ KeyValue: sce.Key,
+ ValueValue: sce.Value,
+ IDValue: sce.Id,
+ TxHashValue: e.Container,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go
index dcd4201e4..8acc8c15c 100644
--- a/pkg/morph/event/frostfs/config_test.go
+++ b/pkg/morph/event/frostfs/config_test.go
@@ -3,7 +3,6 @@ package frostfs
import (
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -21,7 +20,7 @@ func TestParseConfig(t *testing.T) {
}
_, err := ParseConfig(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong first parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go
index d8a3b82f0..fcb01577e 100644
--- a/pkg/morph/event/frostfs/deposit.go
+++ b/pkg/morph/event/frostfs/deposit.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,50 +34,15 @@ func (d Deposit) Amount() int64 { return d.AmountValue }
// ParseDeposit notification into deposit structure.
func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ev Deposit
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var de frostfs.DepositEvent
+ if err := de.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err)
}
- if ln := len(params); ln != 4 {
- return nil, event.WrongNumberOfParameters(4, ln)
- }
-
- // parse from
- from, err := client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit sender: %w", err)
- }
-
- ev.FromValue, err = util.Uint160DecodeBytesBE(from)
- if err != nil {
- return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err)
- }
-
- // parse amount
- ev.AmountValue, err = client.IntFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit amount: %w", err)
- }
-
- // parse to
- to, err := client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit receiver: %w", err)
- }
-
- ev.ToValue, err = util.Uint160DecodeBytesBE(to)
- if err != nil {
- return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err)
- }
-
- // parse id
- ev.IDValue, err = client.BytesFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit id: %w", err)
- }
-
- return ev, nil
+ return Deposit{
+ IDValue: de.TxHash[:],
+ AmountValue: de.Amount.Int64(),
+ FromValue: de.From,
+ ToValue: de.Receiver,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go
index f279a7f9c..38d3e61f6 100644
--- a/pkg/morph/event/frostfs/deposit_test.go
+++ b/pkg/morph/event/frostfs/deposit_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -12,7 +11,7 @@ import (
func TestParseDeposit(t *testing.T) {
var (
- id = []byte("Hello World")
+ id = util.Uint256{0, 1, 2, 3}
from = util.Uint160{0x1, 0x2, 0x3}
to = util.Uint160{0x3, 0x2, 0x1}
@@ -26,7 +25,7 @@ func TestParseDeposit(t *testing.T) {
}
_, err := ParseDeposit(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong from parameter", func(t *testing.T) {
@@ -72,12 +71,12 @@ func TestParseDeposit(t *testing.T) {
stackitem.NewByteArray(from.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
stackitem.NewByteArray(to.BytesBE()),
- stackitem.NewByteArray(id),
+ stackitem.NewByteArray(id[:]),
}))
require.NoError(t, err)
require.Equal(t, Deposit{
- IDValue: id,
+ IDValue: id[:],
AmountValue: amount,
FromValue: from,
ToValue: to,
diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go
deleted file mode 100644
index 62203540f..000000000
--- a/pkg/morph/event/frostfs/ir_update.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package frostfs
-
-import (
- "crypto/elliptic"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-type UpdateInnerRing struct {
- keys []*keys.PublicKey
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (UpdateInnerRing) MorphEvent() {}
-
-func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys }
-
-func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) {
- var (
- ev UpdateInnerRing
- err error
- )
-
- if ln := len(params); ln != 1 {
- return nil, event.WrongNumberOfParameters(1, ln)
- }
-
- // parse keys
- irKeys, err := client.ArrayFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get updated inner ring keys: %w", err)
- }
-
- ev.keys = make([]*keys.PublicKey, 0, len(irKeys))
- for i := range irKeys {
- rawKey, err := client.BytesFromStackItem(irKeys[i])
- if err != nil {
- return nil, fmt.Errorf("could not get updated inner ring public key: %w", err)
- }
-
- key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
- if err != nil {
- return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err)
- }
-
- ev.keys = append(ev.keys, key)
- }
-
- return ev, nil
-}
diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go
deleted file mode 100644
index fae87e5f9..000000000
--- a/pkg/morph/event/frostfs/ir_update_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package frostfs
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func genKey(t *testing.T) *keys.PrivateKey {
- priv, err := keys.NewPrivateKey()
- require.NoError(t, err)
- return priv
-}
-
-func TestParseUpdateInnerRing(t *testing.T) {
- publicKeys := []*keys.PublicKey{
- genKey(t).PublicKey(),
- genKey(t).PublicKey(),
- genKey(t).PublicKey(),
- }
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseUpdateInnerRing(prms)
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
- })
-
- t.Run("wrong first parameter", func(t *testing.T) {
- _, err := ParseUpdateInnerRing([]stackitem.Item{
- stackitem.NewMap(),
- })
-
- require.Error(t, err)
- })
-
- t.Run("correct", func(t *testing.T) {
- ev, err := ParseUpdateInnerRing([]stackitem.Item{
- stackitem.NewArray([]stackitem.Item{
- stackitem.NewByteArray(publicKeys[0].Bytes()),
- stackitem.NewByteArray(publicKeys[1].Bytes()),
- stackitem.NewByteArray(publicKeys[2].Bytes()),
- }),
- })
- require.NoError(t, err)
-
- require.Equal(t, UpdateInnerRing{
- keys: publicKeys,
- }, ev)
- })
-}
diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go
index f48067f86..2568b6512 100644
--- a/pkg/morph/event/frostfs/withdraw.go
+++ b/pkg/morph/event/frostfs/withdraw.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -30,39 +30,14 @@ func (w Withdraw) Amount() int64 { return w.AmountValue }
// ParseWithdraw notification into withdraw structure.
func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ev Withdraw
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var we frostfs.WithdrawEvent
+ if err := we.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err)
}
- if ln := len(params); ln != 3 {
- return nil, event.WrongNumberOfParameters(3, ln)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw user: %w", err)
- }
-
- ev.UserValue, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err)
- }
-
- // parse amount
- ev.AmountValue, err = client.IntFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw amount: %w", err)
- }
-
- // parse id
- ev.IDValue, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw id: %w", err)
- }
-
- return ev, nil
+ return Withdraw{
+ IDValue: we.TxHash[:],
+ AmountValue: we.Amount.Int64(),
+ UserValue: we.User,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go
index 33435d19a..e382305e6 100644
--- a/pkg/morph/event/frostfs/withdraw_test.go
+++ b/pkg/morph/event/frostfs/withdraw_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -12,7 +11,7 @@ import (
func TestParseWithdraw(t *testing.T) {
var (
- id = []byte("Hello World")
+ id = util.Uint256{1, 2, 3}
user = util.Uint160{0x1, 0x2, 0x3}
amount int64 = 10
@@ -25,7 +24,7 @@ func TestParseWithdraw(t *testing.T) {
}
_, err := ParseWithdraw(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong user parameter", func(t *testing.T) {
@@ -59,12 +58,12 @@ func TestParseWithdraw(t *testing.T) {
ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(user.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
- stackitem.NewByteArray(id),
+ stackitem.NewByteArray(id[:]),
}))
require.NoError(t, err)
require.Equal(t, Withdraw{
- IDValue: id,
+ IDValue: id[:],
AmountValue: amount,
UserValue: user,
}, ev)
diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go
index 182b4667e..55a514ff1 100644
--- a/pkg/morph/event/handlers.go
+++ b/pkg/morph/event/handlers.go
@@ -1,32 +1,26 @@
package event
import (
+ "context"
+
"github.com/nspcc-dev/neo-go/pkg/core/block"
+ "github.com/nspcc-dev/neo-go/pkg/util"
)
// Handler is an Event processing function.
-type Handler func(Event)
+type Handler func(context.Context, Event)
// BlockHandler is a chain block processing function.
-type BlockHandler func(*block.Block)
+type BlockHandler func(context.Context, *block.Block)
// NotificationHandlerInfo is a structure that groups
// the parameters of the handler of particular
// contract event.
type NotificationHandlerInfo struct {
- scriptHashWithType
-
- h Handler
-}
-
-// SetHandler is an event handler setter.
-func (s *NotificationHandlerInfo) SetHandler(v Handler) {
- s.h = v
-}
-
-// Handler returns an event handler.
-func (s NotificationHandlerInfo) Handler() Handler {
- return s.h
+ Contract util.Uint160
+ Type Type
+ Parser NotificationParser
+ Handlers []Handler
}
// NotaryHandlerInfo is a structure that groups
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index dd3c7d216..e5cdfeef7 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -33,13 +33,6 @@ type Listener interface {
// it could not be started.
ListenWithError(context.Context, chan<- error)
- // SetNotificationParser must set the parser of particular contract event.
- //
- // Parser of each event must be set once. All parsers must be set before Listen call.
- //
- // Must ignore nil parsers and all calls after listener has been started.
- SetNotificationParser(NotificationParserInfo)
-
// RegisterNotificationHandler must register the event handler for particular notification event of contract.
//
// The specified handler must be called after each capture and parsing of the event.
@@ -100,8 +93,6 @@ type listener struct {
startOnce, stopOnce sync.Once
- started bool
-
notificationParsers map[scriptHashWithType]NotificationParser
notificationHandlers map[scriptHashWithType][]Handler
@@ -120,7 +111,7 @@ type listener struct {
pool *ants.Pool
}
-const newListenerFailMsg = "could not instantiate Listener"
+const newListenerFailMsg = "instantiate Listener"
var (
errNilLogger = errors.New("nil logger")
@@ -143,11 +134,8 @@ func (l *listener) Listen(ctx context.Context) {
l.startOnce.Do(func() {
l.wg.Add(1)
defer l.wg.Done()
- if err := l.listen(ctx, nil); err != nil {
- l.log.Error(logs.EventCouldNotStartListenToEvents,
- zap.String("error", err.Error()),
- )
- }
+
+ l.listen(ctx, nil)
})
}
@@ -161,26 +149,17 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
l.startOnce.Do(func() {
l.wg.Add(1)
defer l.wg.Done()
- if err := l.listen(ctx, intError); err != nil {
- l.log.Error(logs.EventCouldNotStartListenToEvents,
- zap.String("error", err.Error()),
- )
- l.sendError(ctx, intError, err)
- }
+
+ l.listen(ctx, intError)
})
}
-func (l *listener) listen(ctx context.Context, intError chan<- error) error {
- // mark listener as started
- l.started = true
-
+func (l *listener) listen(ctx context.Context, intError chan<- error) {
subErrCh := make(chan error)
go l.subscribe(subErrCh)
l.listenLoop(ctx, intError, subErrCh)
-
- return nil
}
func (l *listener) subscribe(errCh chan error) {
@@ -192,7 +171,7 @@ func (l *listener) subscribe(errCh chan error) {
// fill the list with the contracts with set event parsers.
l.mtx.RLock()
for hashType := range l.notificationParsers {
- scHash := hashType.ScriptHash()
+ scHash := hashType.Hash
// prevent repetitions
for _, hash := range hashes {
@@ -201,26 +180,26 @@ func (l *listener) subscribe(errCh chan error) {
}
}
- hashes = append(hashes, hashType.ScriptHash())
+ hashes = append(hashes, hashType.Hash)
}
l.mtx.RUnlock()
err := l.subscriber.SubscribeForNotification(hashes...)
if err != nil {
- errCh <- fmt.Errorf("could not subscribe for notifications: %w", err)
+ errCh <- fmt.Errorf("subscribe for notifications: %w", err)
return
}
if len(l.blockHandlers) > 0 {
if err = l.subscriber.BlockNotifications(); err != nil {
- errCh <- fmt.Errorf("could not subscribe for blocks: %w", err)
+ errCh <- fmt.Errorf("subscribe for blocks: %w", err)
return
}
}
if l.listenNotary {
if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
- errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err)
+ errCh <- fmt.Errorf("subscribe for notary requests: %w", err)
return
}
}
@@ -234,7 +213,7 @@ func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error
// in the same routine when shutting down node.
select {
case <-ctx.Done():
- l.log.Info(logs.EventStopEventListenerByContext,
+ l.log.Info(ctx, logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
return false
@@ -251,81 +230,81 @@ loop:
select {
case err := <-subErrCh:
if !l.sendError(ctx, intErr, err) {
- l.log.Error(logs.EventStopEventListenerByError, zap.Error(err))
+ l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err))
}
break loop
case <-ctx.Done():
- l.log.Info(logs.EventStopEventListenerByContext,
+ l.log.Info(ctx, logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
break loop
case notifyEvent, ok := <-chs.NotificationsCh:
if !ok {
- l.log.Warn(logs.EventStopEventListenerByNotificationChannel)
+ l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel)
l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated)
break loop
} else if notifyEvent == nil {
- l.log.Warn(logs.EventNilNotificationEventWasCaught)
+ l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught)
continue loop
}
- l.handleNotifyEvent(notifyEvent)
+ l.handleNotifyEvent(ctx, notifyEvent)
case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
- l.log.Warn(logs.EventStopEventListenerByNotaryChannel)
+ l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel)
l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated)
break loop
} else if notaryEvent == nil {
- l.log.Warn(logs.EventNilNotaryEventWasCaught)
+ l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught)
continue loop
}
- l.handleNotaryEvent(notaryEvent)
+ l.handleNotaryEvent(ctx, notaryEvent)
case b, ok := <-chs.BlockCh:
if !ok {
- l.log.Warn(logs.EventStopEventListenerByBlockChannel)
+ l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel)
l.sendError(ctx, intErr, errBlockNotificationChannelClosed)
break loop
} else if b == nil {
- l.log.Warn(logs.EventNilBlockWasCaught)
+ l.log.Warn(ctx, logs.EventNilBlockWasCaught)
continue loop
}
- l.handleBlockEvent(b)
+ l.handleBlockEvent(ctx, b)
}
}
}
-func (l *listener) handleBlockEvent(b *block.Block) {
+func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) {
if err := l.pool.Submit(func() {
for i := range l.blockHandlers {
- l.blockHandlers[i](b)
+ l.blockHandlers[i](ctx, b)
}
}); err != nil {
- l.log.Warn(logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) {
+func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotary(notaryEvent)
+ l.parseAndHandleNotary(ctx, notaryEvent)
}); err != nil {
- l.log.Warn(logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotification(notifyEvent)
+ l.parseAndHandleNotification(ctx, notifyEvent)
}); err != nil {
- l.log.Warn(logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
log := l.log.With(
zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()),
)
@@ -338,16 +317,14 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
)
// get the event parser
- keyEvent := scriptHashWithType{}
- keyEvent.SetScriptHash(notifyEvent.ScriptHash)
- keyEvent.SetType(typEvent)
+ keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent}
l.mtx.RLock()
parser, ok := l.notificationParsers[keyEvent]
l.mtx.RUnlock()
if !ok {
- log.Debug(logs.EventEventParserNotSet)
+ log.Debug(ctx, logs.EventEventParserNotSet)
return
}
@@ -355,8 +332,8 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
// parse the notification event
event, err := parser(notifyEvent)
if err != nil {
- log.Warn(logs.EventCouldNotParseNotificationEvent,
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotParseNotificationEvent,
+ zap.Error(err),
)
return
@@ -368,7 +345,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
l.mtx.RUnlock()
if len(handlers) == 0 {
- log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
@@ -376,11 +353,11 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
}
for _, handler := range handlers {
- handler(event)
+ handler(ctx, event)
}
}
-func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
+func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) {
// prepare the notary event
notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest)
if err != nil {
@@ -388,14 +365,14 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
switch {
case errors.Is(err, ErrTXAlreadyHandled):
case errors.As(err, &expErr):
- l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent,
- zap.String("error", err.Error()),
+ l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent,
+ zap.Error(err),
zap.Uint32("current_block_height", expErr.CurrentBlockHeight),
zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight),
)
default:
- l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent,
- zap.String("error", err.Error()),
+ l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent,
+ zap.Error(err),
)
}
@@ -418,7 +395,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
l.mtx.RUnlock()
if !ok {
- log.Debug(logs.EventNotaryParserNotSet)
+ log.Debug(ctx, logs.EventNotaryParserNotSet)
return
}
@@ -426,8 +403,8 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
// parse the notary event
event, err := parser(notaryEvent)
if err != nil {
- log.Warn(logs.EventCouldNotParseNotaryEvent,
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotParseNotaryEvent,
+ zap.Error(err),
)
return
@@ -439,47 +416,14 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
l.mtx.RUnlock()
if !ok {
- log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
return
}
- handler(event)
-}
-
-// SetNotificationParser sets the parser of particular contract event.
-//
-// Ignores nil and already set parsers.
-// Ignores the parser if listener is started.
-func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
- log := l.log.With(
- zap.String("contract", pi.ScriptHash().StringLE()),
- zap.Stringer("event_type", pi.getType()),
- )
-
- parser := pi.parser()
- if parser == nil {
- log.Info(logs.EventIgnoreNilEventParser)
- return
- }
-
- l.mtx.Lock()
- defer l.mtx.Unlock()
-
- // check if the listener was started
- if l.started {
- log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser)
- return
- }
-
- // add event parser
- if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok {
- l.notificationParsers[pi.scriptHashWithType] = pi.parser()
- }
-
- log.Debug(logs.EventRegisteredNewEventParser)
+ handler(ctx, event)
}
// RegisterNotificationHandler registers the handler for particular notification event of contract.
@@ -488,35 +432,23 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
// Ignores handlers of event without parser.
func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
log := l.log.With(
- zap.String("contract", hi.ScriptHash().StringLE()),
- zap.Stringer("event_type", hi.GetType()),
+ zap.String("contract", hi.Contract.StringLE()),
+ zap.Stringer("event_type", hi.Type),
)
- handler := hi.Handler()
- if handler == nil {
- log.Warn(logs.EventIgnoreNilEventHandler)
- return
- }
-
// check if parser was set
- l.mtx.RLock()
- _, ok := l.notificationParsers[hi.scriptHashWithType]
- l.mtx.RUnlock()
-
- if !ok {
- log.Warn(logs.EventIgnoreHandlerOfEventWoParser)
- return
- }
-
- // add event handler
l.mtx.Lock()
- l.notificationHandlers[hi.scriptHashWithType] = append(
- l.notificationHandlers[hi.scriptHashWithType],
- hi.Handler(),
- )
- l.mtx.Unlock()
+ defer l.mtx.Unlock()
- log.Debug(logs.EventRegisteredNewEventHandler)
+ k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type}
+
+ l.notificationParsers[k] = hi.Parser
+ l.notificationHandlers[k] = append(
+ l.notificationHandlers[k],
+ hi.Handlers...,
+ )
+
+ log.Debug(context.Background(), logs.EventRegisteredNewEventHandler)
}
// EnableNotarySupport enables notary request listening. Passed hash is
@@ -555,27 +487,15 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
zap.Stringer("notary_type", pi.RequestType()),
)
- parser := pi.parser()
- if parser == nil {
- log.Info(logs.EventIgnoreNilNotaryEventParser)
- return
- }
-
l.mtx.Lock()
defer l.mtx.Unlock()
- // check if the listener was started
- if l.started {
- log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser)
- return
- }
-
// add event parser
if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok {
l.notaryParsers[pi.notaryRequestTypes] = pi.parser()
}
- log.Info(logs.EventRegisteredNewEventParser)
+ log.Info(context.Background(), logs.EventRegisteredNewEventParser)
}
// RegisterNotaryHandler registers the handler for particular notification notary request event.
@@ -593,19 +513,13 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
zap.Stringer("notary type", hi.RequestType()),
)
- handler := hi.Handler()
- if handler == nil {
- log.Warn(logs.EventIgnoreNilNotaryEventHandler)
- return
- }
-
// check if parser was set
l.mtx.RLock()
_, ok := l.notaryParsers[hi.notaryRequestTypes]
l.mtx.RUnlock()
if !ok {
- log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser)
+ log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser)
return
}
@@ -614,7 +528,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler()
l.mtx.Unlock()
- log.Info(logs.EventRegisteredNewEventHandler)
+ log.Info(context.Background(), logs.EventRegisteredNewEventHandler)
}
// Stop closes subscription channel with remote neo node.
@@ -627,11 +541,6 @@ func (l *listener) Stop() {
}
func (l *listener) RegisterBlockHandler(handler BlockHandler) {
- if handler == nil {
- l.log.Warn(logs.EventIgnoreNilBlockHandler)
- return
- }
-
l.blockHandlers = append(l.blockHandlers, handler)
}
@@ -648,7 +557,7 @@ func NewListener(p ListenerParams) (Listener, error) {
// The default capacity is 0, which means "infinite".
pool, err := ants.NewPool(p.WorkerPoolCapacity)
if err != nil {
- return nil, fmt.Errorf("could not init worker pool: %w", err)
+ return nil, fmt.Errorf("init worker pool: %w", err)
}
return &listener{
diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go
index 5f7cf9f43..87f37305f 100644
--- a/pkg/morph/event/listener_test.go
+++ b/pkg/morph/event/listener_test.go
@@ -34,34 +34,24 @@ func TestEventHandling(t *testing.T) {
blockHandled := make(chan bool)
handledBlocks := make([]*block.Block, 0)
- l.RegisterBlockHandler(func(b *block.Block) {
+ l.RegisterBlockHandler(func(_ context.Context, b *block.Block) {
handledBlocks = append(handledBlocks, b)
blockHandled <- true
})
- key := scriptHashWithType{
- scriptHashValue: scriptHashValue{
- hash: util.Uint160{100},
- },
- typeValue: typeValue{
- typ: TypeFromString("notification type"),
- },
- }
-
- l.SetNotificationParser(NotificationParserInfo{
- scriptHashWithType: key,
- p: func(cne *state.ContainedNotificationEvent) (Event, error) {
- return testNotificationEvent{source: cne}, nil
- },
- })
-
notificationHandled := make(chan bool)
handledNotifications := make([]Event, 0)
l.RegisterNotificationHandler(NotificationHandlerInfo{
- scriptHashWithType: key,
- h: func(e Event) {
- handledNotifications = append(handledNotifications, e)
- notificationHandled <- true
+ Contract: util.Uint160{100},
+ Type: TypeFromString("notification type"),
+ Parser: func(cne *state.ContainedNotificationEvent) (Event, error) {
+ return testNotificationEvent{source: cne}, nil
+ },
+ Handlers: []Handler{
+ func(_ context.Context, e Event) {
+ handledNotifications = append(handledNotifications, e)
+ notificationHandled <- true
+ },
},
})
@@ -137,7 +127,7 @@ func TestErrorPassing(t *testing.T) {
WorkerPoolCapacity: 10,
})
require.NoError(t, err, "failed to create listener")
- l.RegisterBlockHandler(func(b *block.Block) {})
+ l.RegisterBlockHandler(func(context.Context, *block.Block) {})
errCh := make(chan error)
diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go
index e454e2a6a..39c8f6237 100644
--- a/pkg/morph/event/netmap/epoch.go
+++ b/pkg/morph/event/netmap/epoch.go
@@ -1,9 +1,7 @@
package netmap
import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -37,22 +35,13 @@ func (s NewEpoch) TxHash() util.Uint256 {
//
// Result is type of NewEpoch.
func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) {
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != 1 {
- return nil, event.WrongNumberOfParameters(1, ln)
- }
-
- prmEpochNum, err := client.IntFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get integer epoch number: %w", err)
+ var nee netmap.NewEpochEvent
+ if err := nee.FromStackItem(e.Item); err != nil {
+ return nil, err
}
return NewEpoch{
- Num: uint64(prmEpochNum),
+ Num: nee.Epoch.Uint64(),
Hash: e.Container,
}, nil
}
diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go
index bc267ecb6..6ff692327 100644
--- a/pkg/morph/event/netmap/epoch_test.go
+++ b/pkg/morph/event/netmap/epoch_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -17,7 +16,7 @@ func TestParseNewEpoch(t *testing.T) {
}
_, err := ParseNewEpoch(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong first parameter type", func(t *testing.T) {
diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go
index 0260810b8..993182ab4 100644
--- a/pkg/morph/event/netmap/update_peer_notary.go
+++ b/pkg/morph/event/netmap/update_peer_notary.go
@@ -10,7 +10,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
)
-var errNilPubKey = errors.New("could not parse public key: public key is nil")
+var errNilPubKey = errors.New("public key is nil")
func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
if v == nil {
@@ -19,7 +19,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256())
if err != nil {
- return fmt.Errorf("could not parse public key: %w", err)
+ return fmt.Errorf("parse public key: %w", err)
}
return
diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go
index 37091f768..b11973646 100644
--- a/pkg/morph/event/notary_preparator.go
+++ b/pkg/morph/event/notary_preparator.go
@@ -127,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
for {
opCode, param, err = ctx.Next()
if err != nil {
- return nil, fmt.Errorf("could not get next opcode in script: %w", err)
+ return nil, fmt.Errorf("get next opcode in script: %w", err)
}
if opCode == opcode.RET {
@@ -147,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
// retrieve contract's script hash
contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param)
if err != nil {
- return nil, fmt.Errorf("could not decode contract hash: %w", err)
+ return nil, fmt.Errorf("decode contract hash: %w", err)
}
// retrieve contract's method
@@ -164,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
if len(args) != 0 {
err = p.validateParameterOpcodes(args)
if err != nil {
- return nil, fmt.Errorf("could not validate arguments: %w", err)
+ return nil, fmt.Errorf("validate arguments: %w", err)
}
// without args packing opcodes
@@ -199,14 +199,14 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error {
// neo-go API)
//
// this check prevents notary flow recursion
- if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 ||
- bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version
+ if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 &&
+ !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version
return ErrTXAlreadyHandled
}
currentAlphabet, err := p.alphaKeys()
if err != nil {
- return fmt.Errorf("could not fetch Alphabet public keys: %w", err)
+ return fmt.Errorf("fetch Alphabet public keys: %w", err)
}
err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
@@ -239,7 +239,7 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error {
argsLen, err := IntFromOpcode(ops[l-2])
if err != nil {
- return fmt.Errorf("could not parse argument len: %w", err)
+ return fmt.Errorf("parse argument len: %w", err)
}
err = validateNestedArgs(argsLen, ops[:l-2])
@@ -273,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error {
argsLen, err := IntFromOpcode(ops[i-1])
if err != nil {
- return fmt.Errorf("could not parse argument len: %w", err)
+ return fmt.Errorf("parse argument len: %w", err)
}
expArgLen += argsLen + 1
@@ -307,7 +307,7 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error {
currBlock, err := p.blockCounter.BlockCount()
if err != nil {
- return fmt.Errorf("could not fetch current chain height: %w", err)
+ return fmt.Errorf("fetch current chain height: %w", err)
}
if currBlock >= nvb.Height {
@@ -327,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("could not get Alphabet verification script: %w", err)
+ return fmt.Errorf("get Alphabet verification script: %w", err)
}
if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) {
@@ -346,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("could not get Alphabet verification script: %w", err)
+ return fmt.Errorf("get Alphabet verification script: %w", err)
}
// the second one must be witness of the current
@@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
// the last one must be a placeholder for notary contract witness
last := len(w) - 1
- if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981
- bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
+ if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981
+ !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
len(w[last].VerificationScript) != 0 {
return errIncorrectNotaryPlaceholder
}
diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go
index 90eff0bd2..5adeb4b30 100644
--- a/pkg/morph/event/parsers.go
+++ b/pkg/morph/event/parsers.go
@@ -11,15 +11,6 @@ import (
// from the StackItem list.
type NotificationParser func(*state.ContainedNotificationEvent) (Event, error)
-// NotificationParserInfo is a structure that groups
-// the parameters of particular contract
-// notification event parser.
-type NotificationParserInfo struct {
- scriptHashWithType
-
- p NotificationParser
-}
-
// NotaryPreparator constructs NotaryEvent
// from the NotaryRequest event.
type NotaryPreparator interface {
@@ -47,24 +38,6 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) {
n.p = p
}
-// SetParser is an event parser setter.
-func (s *NotificationParserInfo) SetParser(v NotificationParser) {
- s.p = v
-}
-
-func (s NotificationParserInfo) parser() NotificationParser {
- return s.p
-}
-
-// SetType is an event type setter.
-func (s *NotificationParserInfo) SetType(v Type) {
- s.typ = v
-}
-
-func (s NotificationParserInfo) getType() Type {
- return s.typ
-}
-
type wrongPrmNumber struct {
exp, act int
}
diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go
index 28c968046..b384e436b 100644
--- a/pkg/morph/event/rolemanagement/designate.go
+++ b/pkg/morph/event/rolemanagement/designate.go
@@ -26,7 +26,7 @@ func (Designate) MorphEvent() {}
func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) {
params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("parse stack items from notify event: %w", err)
}
if len(params) != 2 {
diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go
index f3b6443fb..0088be400 100644
--- a/pkg/morph/event/utils.go
+++ b/pkg/morph/event/utils.go
@@ -1,6 +1,7 @@
package event
import (
+ "context"
"errors"
"fmt"
@@ -19,13 +20,9 @@ type scriptHashValue struct {
hash util.Uint160
}
-type typeValue struct {
- typ Type
-}
-
type scriptHashWithType struct {
- scriptHashValue
- typeValue
+ Hash util.Uint160
+ Type Type
}
type notaryRequestTypes struct {
@@ -72,25 +69,15 @@ func (s scriptHashValue) ScriptHash() util.Uint160 {
return s.hash
}
-// SetType is an event type setter.
-func (s *typeValue) SetType(v Type) {
- s.typ = v
-}
-
-// GetType is an event type getter.
-func (s typeValue) GetType() Type {
- return s.typ
-}
-
// WorkerPoolHandler sets closure over worker pool w with passed handler h.
func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler {
- return func(e Event) {
+ return func(ctx context.Context, e Event) {
err := w.Submit(func() {
- h(e)
+ h(ctx, e)
})
if err != nil {
- log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool,
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool,
+ zap.Error(err),
)
}
}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index ee5466a7d..4ef59ed6a 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -245,16 +245,16 @@ routeloop:
}
func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool {
- s.log.Info(logs.RPConnectionLost)
+ s.log.Info(ctx, logs.RPConnectionLost)
if !s.client.SwitchRPC(ctx) {
- s.log.Error(logs.RPCNodeSwitchFailure)
+ s.log.Error(ctx, logs.RPCNodeSwitchFailure)
return false
}
s.Lock()
chs := newSubChannels()
go func() {
- finishCh <- s.restoreSubscriptions(chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
+ finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
}()
s.current = chs
s.Unlock()
@@ -295,7 +295,7 @@ drainloop:
// restoreSubscriptions restores subscriptions according to
// cached information about them.
-func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotificationEvent,
+func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent,
blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent,
) bool {
var err error
@@ -304,7 +304,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
if s.subscribedToNewBlocks {
_, err = s.client.ReceiveBlocks(blCh)
if err != nil {
- s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
@@ -313,7 +313,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
for contract := range s.subscribedEvents {
_, err = s.client.ReceiveExecutionNotifications(contract, notifCh)
if err != nil {
- s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
@@ -322,7 +322,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
for signer := range s.subscribedNotaryEvents {
_, err = s.client.ReceiveNotaryRequests(signer, notaryCh)
if err != nil {
- s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go
index be20d3571..974be1120 100644
--- a/pkg/morph/timer/block.go
+++ b/pkg/morph/timer/block.go
@@ -15,41 +15,19 @@ type BlockTickHandler func()
// It can tick the blocks and perform certain actions
// on block time intervals.
type BlockTimer struct {
- rolledBack bool
-
mtx sync.Mutex
dur BlockMeter
baseDur uint32
- mul, div uint32
-
cur, tgt uint32
last uint32
h BlockTickHandler
- ps []BlockTimer
-
once bool
-
- deltaCfg
-}
-
-// DeltaOption is an option of delta-interval handler.
-type DeltaOption func(*deltaCfg)
-
-type deltaCfg struct {
- pulse bool
-}
-
-// WithPulse returns option to call delta-interval handler multiple times.
-func WithPulse() DeltaOption {
- return func(c *deltaCfg) {
- c.pulse = true
- }
}
// StaticBlockMeter returns BlockMeters that always returns (d, nil).
@@ -65,52 +43,19 @@ func StaticBlockMeter(d uint32) BlockMeter {
func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
- mul: 1,
- div: 1,
h: h,
- deltaCfg: deltaCfg{
- pulse: true,
- },
}
}
// NewOneTickTimer creates a new BlockTimer that ticks only once.
-//
-// Do not use delta handlers with pulse in this timer.
func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
- mul: 1,
- div: 1,
h: h,
once: true,
}
}
-// OnDelta registers handler which is executed on (mul / div * BlockMeter()) block
-// after basic interval reset.
-//
-// If WithPulse option is provided, handler is executed (mul / div * BlockMeter()) block
-// during base interval.
-func (t *BlockTimer) OnDelta(mul, div uint32, h BlockTickHandler, opts ...DeltaOption) {
- c := deltaCfg{
- pulse: false,
- }
-
- for i := range opts {
- opts[i](&c)
- }
-
- t.ps = append(t.ps, BlockTimer{
- mul: mul,
- div: div,
- h: h,
- once: t.once,
-
- deltaCfg: c,
- })
-}
-
// Reset resets previous ticks of the BlockTimer.
//
// Returns BlockMeter's error upon occurrence.
@@ -124,29 +69,18 @@ func (t *BlockTimer) Reset() error {
t.resetWithBaseInterval(d)
- for i := range t.ps {
- t.ps[i].resetWithBaseInterval(d)
- }
-
t.mtx.Unlock()
return nil
}
func (t *BlockTimer) resetWithBaseInterval(d uint32) {
- t.rolledBack = false
t.baseDur = d
t.reset()
}
func (t *BlockTimer) reset() {
- mul, div := t.mul, t.div
-
- if !t.pulse && t.rolledBack && mul < div {
- mul, div = 1, 1
- }
-
- delta := mul * t.baseDur / div
+ delta := t.baseDur
if delta == 0 {
delta = 1
}
@@ -180,12 +114,7 @@ func (t *BlockTimer) tick(h uint32) {
if !t.once {
t.cur = 0
- t.rolledBack = true
t.reset()
}
}
-
- for i := range t.ps {
- t.ps[i].tick(h)
- }
}
diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go
index ee6091845..a144b3db6 100644
--- a/pkg/morph/timer/block_test.go
+++ b/pkg/morph/timer/block_test.go
@@ -1,6 +1,7 @@
package timer_test
import (
+ "errors"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
@@ -17,7 +18,7 @@ func tickN(t *timer.BlockTimer, n uint32) {
// "resetting" consists of ticking the current height as well and invoking `Reset`.
func TestIRBlockTimer_Reset(t *testing.T) {
var baseCounter [2]int
- blockDur := uint32(3)
+ const blockDur = uint32(3)
bt1 := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
@@ -48,8 +49,40 @@ func TestIRBlockTimer_Reset(t *testing.T) {
require.Equal(t, baseCounter[0], baseCounter[1])
}
+func TestBlockTimer_ResetChangeDuration(t *testing.T) {
+ var dur uint32 = 2
+ var err error
+ var counter int
+
+ bt := timer.NewBlockTimer(
+ func() (uint32, error) { return dur, err },
+ func() { counter++ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 2)
+ require.Equal(t, 1, counter)
+
+ t.Run("return error", func(t *testing.T) {
+ dur = 5
+ err = errors.New("my awesome error")
+ require.ErrorIs(t, bt.Reset(), err)
+
+ tickN(bt, 2)
+ require.Equal(t, 2, counter)
+ })
+ t.Run("change duration", func(t *testing.T) {
+ dur = 5
+ err = nil
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 5)
+ require.Equal(t, 3, counter)
+ })
+}
+
func TestBlockTimer(t *testing.T) {
- blockDur := uint32(10)
+ const blockDur = uint32(10)
baseCallCounter := uint32(0)
bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
@@ -63,85 +96,6 @@ func TestBlockTimer(t *testing.T) {
tickN(bt, intervalNum*blockDur)
require.Equal(t, intervalNum, uint32(baseCallCounter))
-
- // add half-interval handler
- halfCallCounter := uint32(0)
-
- bt.OnDelta(1, 2, func() {
- halfCallCounter++
- })
-
- // add double interval handler
- doubleCallCounter := uint32(0)
-
- bt.OnDelta(2, 1, func() {
- doubleCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- baseCallCounter = 0
- intervalNum = 20
-
- tickN(bt, intervalNum*blockDur)
-
- require.Equal(t, intervalNum, uint32(halfCallCounter))
- require.Equal(t, intervalNum, uint32(baseCallCounter))
- require.Equal(t, intervalNum/2, uint32(doubleCallCounter))
-}
-
-func TestDeltaPulse(t *testing.T) {
- blockDur := uint32(9)
- baseCallCounter := uint32(0)
-
- bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- deltaCallCounter := uint32(0)
-
- div := uint32(3)
-
- bt.OnDelta(1, div, func() {
- deltaCallCounter++
- }, timer.WithPulse())
-
- require.NoError(t, bt.Reset())
-
- intervalNum := uint32(7)
-
- tickN(bt, intervalNum*blockDur)
-
- require.Equal(t, intervalNum, uint32(baseCallCounter))
- require.Equal(t, intervalNum*div, uint32(deltaCallCounter))
-}
-
-func TestDeltaReset(t *testing.T) {
- blockDur := uint32(6)
- baseCallCounter := 0
-
- bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- detlaCallCounter := 0
-
- bt.OnDelta(1, 3, func() {
- detlaCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 6)
-
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 3)
-
- require.Equal(t, 2, detlaCallCounter)
}
func TestNewOneTickTimer(t *testing.T) {
@@ -168,82 +122,51 @@ func TestNewOneTickTimer(t *testing.T) {
tickN(bt, 10)
require.Equal(t, 1, baseCallCounter)
})
-
- t.Run("delta without pulse", func(t *testing.T) {
- blockDur = uint32(10)
- baseCallCounter = 0
-
- bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- detlaCallCounter := 0
-
- bt.OnDelta(1, 10, func() {
- detlaCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 10)
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
-
- tickN(bt, 10) // 10 more ticks must not affect counters
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
- })
}
func TestBlockTimer_TickSameHeight(t *testing.T) {
- var baseCounter, deltaCounter int
+ var baseCounter int
blockDur := uint32(2)
bt := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
func() { baseCounter++ })
- bt.OnDelta(2, 1, func() {
- deltaCounter++
- })
require.NoError(t, bt.Reset())
- check := func(t *testing.T, h uint32, base, delta int) {
+ check := func(t *testing.T, h uint32, base int) {
for range 2 * int(blockDur) {
bt.Tick(h)
require.Equal(t, base, baseCounter)
- require.Equal(t, delta, deltaCounter)
}
}
- check(t, 1, 0, 0)
- check(t, 2, 1, 0)
- check(t, 3, 1, 0)
- check(t, 4, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 1)
+ check(t, 3, 1)
+ check(t, 4, 2)
t.Run("works the same way after `Reset()`", func(t *testing.T) {
t.Run("same block duration", func(t *testing.T) {
require.NoError(t, bt.Reset())
baseCounter = 0
- deltaCounter = 0
- check(t, 1, 0, 0)
- check(t, 2, 1, 0)
- check(t, 3, 1, 0)
- check(t, 4, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 1)
+ check(t, 3, 1)
+ check(t, 4, 2)
})
t.Run("different block duration", func(t *testing.T) {
blockDur = 3
require.NoError(t, bt.Reset())
baseCounter = 0
- deltaCounter = 0
- check(t, 1, 0, 0)
- check(t, 2, 0, 0)
- check(t, 3, 1, 0)
- check(t, 4, 1, 0)
- check(t, 5, 1, 0)
- check(t, 6, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 0)
+ check(t, 3, 1)
+ check(t, 4, 1)
+ check(t, 5, 1)
+ check(t, 6, 2)
})
})
}
diff --git a/pkg/network/address.go b/pkg/network/address.go
index cb83a813d..4643eef15 100644
--- a/pkg/network/address.go
+++ b/pkg/network/address.go
@@ -2,11 +2,11 @@ package network
import (
"errors"
- "fmt"
"net"
"net/url"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
@@ -44,11 +44,9 @@ func (a Address) equal(addr Address) bool {
// See also FromString.
func (a Address) URIAddr() string {
_, host, err := manet.DialArgs(a.ma)
- if err != nil {
- // the only correct way to construct Address is AddressFromString
- // which makes this error appear unexpected
- panic(fmt.Errorf("could not get host addr: %w", err))
- }
+ // the only correct way to construct Address is AddressFromString
+ // which makes this error appear unexpected
+ assert.NoError(err, "could not get host addr")
if !a.IsTLSEnabled() {
return host
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 481d1ea4a..54c1e18fb 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -7,10 +7,12 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -62,12 +64,16 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address
grpcOpts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
+ qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInteceptor(),
+ tracing.NewUnaryClientInterceptor(),
+ tagging.NewUnaryClientInterceptor(),
),
grpc.WithChainStreamInterceptor(
+ qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
+ tagging.NewStreamClientInterceptor(),
),
grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
@@ -155,7 +161,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
group.IterateAddresses(func(addr network.Address) bool {
select {
case <-ctx.Done():
- firstErr = context.Canceled
+ firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled)
return true
default:
}
diff --git a/pkg/network/group.go b/pkg/network/group.go
index 9843b14d4..0044fb2d4 100644
--- a/pkg/network/group.go
+++ b/pkg/network/group.go
@@ -3,6 +3,8 @@ package network
import (
"errors"
"fmt"
+ "iter"
+ "slices"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -67,9 +69,8 @@ func (x AddressGroup) Swap(i, j int) {
// MultiAddressIterator is an interface of network address group.
type MultiAddressIterator interface {
- // IterateAddresses must iterate over network addresses and pass each one
- // to the handler until it returns true.
- IterateAddresses(func(string) bool)
+ // Addresses must return an iterator over network addresses.
+ Addresses() iter.Seq[string]
// NumberOfAddresses must return number of addresses in group.
NumberOfAddresses() int
@@ -130,19 +131,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error {
// iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f
// until 1st parsing failure or f's error.
func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) {
- iter.IterateAddresses(func(s string) bool {
+ for s := range iter.Addresses() {
var a Address
err = a.FromString(s)
if err != nil {
- err = fmt.Errorf("could not parse address from string: %w", err)
- return true
+ return fmt.Errorf("could not parse address from string: %w", err)
}
err = f(a)
-
- return err != nil
- })
+ if err != nil {
+ return err
+ }
+ }
return
}
@@ -164,10 +165,8 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) {
// at least one common address.
func (x AddressGroup) Intersects(x2 AddressGroup) bool {
for i := range x {
- for j := range x2 {
- if x[i].equal(x2[j]) {
- return true
- }
+ if slices.ContainsFunc(x2, x[i].equal) {
+ return true
}
}
diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go
index 5b335fa52..d08264533 100644
--- a/pkg/network/group_test.go
+++ b/pkg/network/group_test.go
@@ -1,6 +1,8 @@
package network
import (
+ "iter"
+ "slices"
"sort"
"testing"
@@ -58,10 +60,8 @@ func TestAddressGroup_FromIterator(t *testing.T) {
type testIterator []string
-func (t testIterator) IterateAddresses(f func(string) bool) {
- for i := range t {
- f(t[i])
- }
+func (t testIterator) Addresses() iter.Seq[string] {
+ return slices.Values(t)
}
func (t testIterator) NumberOfAddresses() int {
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index 49d083a90..8cbf8d9c3 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -80,3 +80,26 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
+
+type containerStreamerV2 struct {
+ containerGRPC.ContainerService_ListStreamServer
+}
+
+func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error {
+ return s.ContainerService_ListStreamServer.Send(
+ resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse),
+ )
+}
+
+// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data
+// to gRPC stream.
+func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error {
+ listReq := new(container.ListStreamRequest)
+ if err := listReq.FromGRPCMessage(req); err != nil {
+ return err
+ }
+
+ return s.srv.ListStream(listReq, &containerStreamerV2{
+ ContainerService_ListStreamServer: gStream,
+ })
+}
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index fa6252118..15dacd553 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -26,7 +26,7 @@ func New(c objectSvc.ServiceServer) *Server {
// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream.
func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
- stream, err := s.srv.Patch()
+ stream, err := s.srv.Patch(gStream.Context())
if err != nil {
return err
}
@@ -68,7 +68,7 @@ func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
- stream, err := s.srv.Put()
+ stream, err := s.srv.Put(gStream.Context())
if err != nil {
return err
}
diff --git a/pkg/network/validation.go b/pkg/network/validation.go
index 92f650119..b5157f28f 100644
--- a/pkg/network/validation.go
+++ b/pkg/network/validation.go
@@ -2,6 +2,7 @@ package network
import (
"errors"
+ "iter"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -34,8 +35,8 @@ var (
// MultiAddressIterator.
type NodeEndpointsIterator netmap.NodeInfo
-func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) {
- (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
+func (x NodeEndpointsIterator) Addresses() iter.Seq[string] {
+ return (netmap.NodeInfo)(x).NetworkEndpoints()
}
func (x NodeEndpointsIterator) NumberOfAddresses() int {
diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go
index b77d3e3e6..6c2df8428 100644
--- a/pkg/services/accounting/morph/executor.go
+++ b/pkg/services/accounting/morph/executor.go
@@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
+func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errors.New("missing account")
@@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceReque
return nil, fmt.Errorf("invalid account: %w", err)
}
- amount, err := s.client.BalanceOf(id)
+ amount, err := s.client.BalanceOf(ctx, id)
if err != nil {
return nil, err
}
- balancePrecision, err := s.client.Decimals()
+ balancePrecision, err := s.client.Decimals(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go
index b9bea07fb..61fb025b8 100644
--- a/pkg/services/apemanager/audit.go
+++ b/pkg/services/apemanager/audit.go
@@ -33,7 +33,7 @@ func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainReq
return res, err
}
- audit.LogRequest(a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
res.GetBody().GetChainID()),
@@ -49,7 +49,7 @@ func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChain
return res, err
}
- audit.LogRequest(a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
nil),
@@ -65,7 +65,7 @@ func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveCh
return res, err
}
- audit.LogRequest(a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
req.GetBody().GetChainID()),
diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go
index e64f9a8d1..1d485321c 100644
--- a/pkg/services/apemanager/errors/errors.go
+++ b/pkg/services/apemanager/errors/errors.go
@@ -9,3 +9,9 @@ func ErrAPEManagerAccessDenied(reason string) error {
err.WriteReason(reason)
return err
}
+
+func ErrAPEManagerInvalidArgument(msg string) error {
+ err := new(apistatus.InvalidArgument)
+ err.SetMessage(msg)
+ return err
+}
diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go
index 86f9cb893..fc08fe569 100644
--- a/pkg/services/apemanager/executor.go
+++ b/pkg/services/apemanager/executor.go
@@ -22,6 +22,7 @@ import (
policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/mr-tron/base58/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
@@ -34,6 +35,8 @@ type cfg struct {
type Service struct {
cfg
+ waiter Waiter
+
cnrSrc containercore.Source
contractStorage ape_contract.ProxyAdaptedContractStorage
@@ -41,11 +44,17 @@ type Service struct {
type Option func(*cfg)
-func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, opts ...Option) *Service {
+type Waiter interface {
+ WaitTxHalt(context.Context, uint32, util.Uint256) error
+}
+
+func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service {
s := &Service{
cnrSrc: cnrSrc,
contractStorage: contractStorage,
+
+ waiter: waiter,
}
for i := range opts {
@@ -53,7 +62,7 @@ func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedC
}
if s.log == nil {
- s.log = &logger.Logger{Logger: zap.NewNop()}
+ s.log = logger.NewLoggerWrapper(zap.NewNop())
}
return s
@@ -69,12 +78,12 @@ var _ Server = (*Service)(nil)
// validateContainerTargetRequest validates request for the container target.
// It checks if request actor is the owner of the container, otherwise it denies the request.
-func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.PublicKey) error {
+func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error {
var cidSDK cidSDK.ID
if err := cidSDK.DecodeString(cid); err != nil {
- return fmt.Errorf("invalid CID format: %w", err)
+ return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err))
}
- isOwner, err := s.isActorContainerOwner(cidSDK, pubKey)
+ isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey)
if err != nil {
return fmt.Errorf("failed to check owner: %w", err)
}
@@ -84,7 +93,7 @@ func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.Public
return nil
}
-func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
+func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -92,7 +101,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest)
chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw())
if err != nil {
- return nil, err
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error())
}
if len(chain.ID) == 0 {
const randomIDLength = 10
@@ -108,15 +117,19 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest)
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, fmt.Errorf("unsupported target type: %s", targetType)
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
}
- if _, _, err = s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain); err != nil {
+ txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
return nil, err
}
@@ -129,7 +142,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest)
return resp, nil
}
-func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
+func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -140,15 +153,19 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, fmt.Errorf("unsupported target type: %s", targetType)
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
}
- if _, _, err = s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()); err != nil {
+ txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID())
+ if err != nil {
+ return nil, err
+ }
+ if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
return nil, err
}
@@ -160,7 +177,7 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe
return resp, nil
}
-func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
+func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -171,12 +188,12 @@ func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequ
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, fmt.Errorf("unsupported target type: %s", targetType)
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
}
chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target)
@@ -210,23 +227,23 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK
}
sig := vh.GetBodySignature()
if sig == nil {
- return nil, errEmptyBodySignature
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error())
}
key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("invalid signature key: %w", err)
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err))
}
return key, nil
}
-func (s *Service) isActorContainerOwner(cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
+func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
var actor user.ID
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
actorOwnerID := new(refs.OwnerID)
actor.WriteToV2(actorOwnerID)
- cnr, err := s.cnrSrc.Get(cid)
+ cnr, err := s.cnrSrc.Get(ctx, cid)
if err != nil {
return false, fmt.Errorf("get container error: %w", err)
}
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
index 278f6da31..eb6263320 100644
--- a/pkg/services/common/ape/checker.go
+++ b/pkg/services/common/ape/checker.go
@@ -1,6 +1,7 @@
package ape
import (
+ "context"
"crypto/ecdsa"
"errors"
"fmt"
@@ -11,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
@@ -20,7 +20,6 @@ import (
)
var (
- errInvalidTargetType = errors.New("bearer token defines non-container target override")
errBearerExpired = errors.New("bearer token has expired")
errBearerInvalidSignature = errors.New("bearer token has invalid signature")
errBearerInvalidContainerID = errors.New("bearer token was created for another container")
@@ -44,15 +43,12 @@ type CheckPrm struct {
// The request's bearer token. It is used in order to check APE overrides with the token.
BearerToken *bearer.Token
-
- // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
- SoftAPECheck bool
}
// CheckCore provides methods to perform the common logic of APE check.
type CheckCore interface {
// CheckAPE performs the common policy-engine check logic on a prepared request.
- CheckAPE(prm CheckPrm) error
+ CheckAPE(ctx context.Context, prm CheckPrm) error
}
type checkerCoreImpl struct {
@@ -74,22 +70,30 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora
}
// CheckAPE performs the common policy-engine check logic on a prepared request.
-func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error {
+func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error {
var cr policyengine.ChainRouter
- if prm.BearerToken != nil && !prm.BearerToken.Impersonate() {
+ if prm.BearerToken != nil {
var err error
if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil {
return fmt.Errorf("bearer validation error: %w", err)
}
- cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride())
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
+ if prm.BearerToken.Impersonate() {
+ cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
+ } else {
+ override, isSet := prm.BearerToken.APEOverride()
+ if !isSet {
+ return errors.New("expected for override within bearer")
+ }
+ cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override)
+ if err != nil {
+ return fmt.Errorf("create chain router error: %w", err)
+ }
}
} else {
cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
}
- groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey)
+ groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey)
if err != nil {
return fmt.Errorf("failed to get group ids: %w", err)
}
@@ -104,17 +108,10 @@ func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error {
if err != nil {
return err
}
- if !found && prm.SoftAPECheck || status == apechain.Allow {
+ if found && status == apechain.Allow {
return nil
}
- err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String())
- return apeErr(err)
-}
-
-func apeErr(err error) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(err.Error())
- return errAccessDenied
+ return newChainRouterError(prm.Request.Operation(), status)
}
// isValidBearer checks whether bearer token was correctly signed by authorized
@@ -136,19 +133,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe
}
// Check for ape overrides defined in the bearer token.
- apeOverride := token.APEOverride()
- if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
- return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
- }
-
- // Then check if container is either empty or equal to the container in the request.
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !cntID.Equals(targetCnr) {
- return errBearerInvalidContainerID
+ if apeOverride, isSet := token.APEOverride(); isSet {
+ switch apeOverride.Target.TargetType {
+ case ape.TargetTypeContainer:
+ var targetCnr cid.ID
+ err := targetCnr.DecodeString(apeOverride.Target.Name)
+ if err != nil {
+ return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
+ }
+ if !cntID.Equals(targetCnr) {
+ return errBearerInvalidContainerID
+ }
+ default:
+ }
}
// Then check if container owner signed this token.
@@ -160,8 +157,16 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe
var usrSender user.ID
user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
+ // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's
+ // public key, but not the actual sender.
+ if !token.Impersonate() {
+ if !token.AssertUser(usrSender) {
+ return errBearerInvalidOwner
+ }
+ } else {
+ if !bearer.ResolveIssuer(*token).Equals(usrSender) {
+ return errBearerInvalidOwner
+ }
}
return nil
diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go
new file mode 100644
index 000000000..d3c381de7
--- /dev/null
+++ b/pkg/services/common/ape/error.go
@@ -0,0 +1,33 @@
+package ape
+
+import (
+ "fmt"
+
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+)
+
+// ChainRouterError is returned when chain router validation prevents
+// the APE request from being processed (no rule found, access denied, etc.).
+type ChainRouterError struct {
+ operation string
+ status apechain.Status
+}
+
+func (e *ChainRouterError) Error() string {
+ return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status())
+}
+
+func (e *ChainRouterError) Operation() string {
+ return e.operation
+}
+
+func (e *ChainRouterError) Status() apechain.Status {
+ return e.status
+}
+
+func newChainRouterError(operation string, status apechain.Status) *ChainRouterError {
+ return &ChainRouterError{
+ operation: operation,
+ status: status,
+ }
+}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
index 2cdb30b45..3b5dab9aa 100644
--- a/pkg/services/container/ape.go
+++ b/pkg/services/container/ape.go
@@ -49,11 +49,11 @@ var (
)
type ir interface {
- InnerRingKeys() ([][]byte, error)
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
}
type containers interface {
- Get(cid.ID) (*containercore.Container, error)
+ Get(context.Context, cid.ID) (*containercore.Container, error)
}
type apeChecker struct {
@@ -106,7 +106,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
defer span.End()
- role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
if err != nil {
return nil, err
}
@@ -116,7 +116,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
if err != nil {
return nil, err
}
@@ -126,11 +126,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
}
}
- namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID())
+ namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("could not get owner namespace: %w", err)
}
- if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil {
+ if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
return nil, err
}
@@ -143,7 +143,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
reqProps,
)
- groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
if err != nil {
return nil, fmt.Errorf("failed to get group ids: %w", err)
}
@@ -175,11 +175,84 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
return nil, apeErr(nativeschema.MethodListContainers, s)
}
+func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream")
+ defer span.End()
+
+ role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ if err != nil {
+ return err
+ }
+
+ reqProps := map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
+ nativeschema.PropertyKeyActorRole: role,
+ }
+
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ if err != nil {
+ return err
+ }
+ if p, ok := peer.FromContext(ctx); ok {
+ if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
+ reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
+ }
+ }
+
+ namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
+ if err != nil {
+ return fmt.Errorf("could not get owner namespace: %w", err)
+ }
+ if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
+ return err
+ }
+
+ request := aperequest.NewRequest(
+ nativeschema.MethodListContainers,
+ aperequest.NewResource(
+ resourceName(namespace, ""),
+ make(map[string]string),
+ ),
+ reqProps,
+ )
+
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ if err != nil {
+ return fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
+ }
+
+ rt := policyengine.NewRequestTargetWithNamespace(namespace)
+ rt.User = &policyengine.Target{
+ Type: policyengine.User,
+ Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
+ }
+ rt.Groups = make([]policyengine.Target, len(groups))
+ for i := range groups {
+ rt.Groups[i] = policyengine.GroupTarget(groups[i])
+ }
+
+ s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
+ if err != nil {
+ return err
+ }
+
+ if found && s == apechain.Allow {
+ return ac.next.ListStream(req, stream)
+ }
+
+ return apeErr(nativeschema.MethodListContainers, s)
+}
+
func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put")
defer span.End()
- role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
if err != nil {
return nil, err
}
@@ -189,7 +262,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
if err != nil {
return nil, err
}
@@ -199,7 +272,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
}
}
- namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID())
+ namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("get namespace error: %w", err)
}
@@ -207,16 +280,21 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, err
}
+ cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer())
+ if err != nil {
+ return nil, fmt.Errorf("get container properties: %w", err)
+ }
+
request := aperequest.NewRequest(
nativeschema.MethodPutContainer,
aperequest.NewResource(
resourceName(namespace, ""),
- make(map[string]string),
+ cnrProps,
),
reqProps,
)
- groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
if err != nil {
return nil, fmt.Errorf("failed to get group ids: %w", err)
}
@@ -248,7 +326,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, apeErr(nativeschema.MethodPutContainer, s)
}
-func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
+func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
if vh == nil {
return "", nil, errMissingVerificationHeader
}
@@ -271,7 +349,7 @@ func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.R
}
pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(pkBytes)
+ isIR, err := ac.isInnerRingKey(ctx, pkBytes)
if err != nil {
return "", nil, err
}
@@ -292,7 +370,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
return err
}
- cont, err := ac.reader.Get(id)
+ cont, err := ac.reader.Get(ctx, id)
if err != nil {
return err
}
@@ -308,7 +386,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
namespace = cntNamespace
}
- groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
if err != nil {
return fmt.Errorf("failed to get group ids: %w", err)
}
@@ -322,7 +400,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
op,
aperequest.NewResource(
resourceName(namespace, id.EncodeToString()),
- ac.getContainerProps(cont),
+ getContainerProps(cont),
),
reqProps,
)
@@ -372,10 +450,26 @@ func resourceName(namespace string, container string) string {
return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container)
}
-func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string {
- return map[string]string{
+func getContainerProps(c *containercore.Container) map[string]string {
+ props := map[string]string{
nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(),
}
+ for attrName, attrVal := range c.Value.Attributes() {
+ name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName)
+ props[name] = attrVal
+ }
+ return props
+}
+
+func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) {
+ if cnrV2 == nil {
+ return nil, errors.New("container is not set")
+ }
+ c := cnrSDK.Container{}
+ if err := c.ReadFromV2(*cnrV2); err != nil {
+ return nil, err
+ }
+ return getContainerProps(&containercore.Container{Value: c}), nil
}
func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader,
@@ -385,7 +479,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
if err != nil {
return nil, nil, err
}
- role, err := ac.getRole(actor, pk, cont, cnrID)
+ role, err := ac.getRole(ctx, actor, pk, cont, cnrID)
if err != nil {
return nil, nil, err
}
@@ -393,7 +487,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
if err != nil {
return nil, nil, err
}
@@ -405,13 +499,13 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
return reqProps, pk, nil
}
-func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
+func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
if cont.Value.Owner().Equals(*actor) {
return nativeschema.PropertyValueContainerRoleOwner, nil
}
pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(pkBytes)
+ isIR, err := ac.isInnerRingKey(ctx, pkBytes)
if err != nil {
return "", err
}
@@ -419,7 +513,7 @@ func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containe
return nativeschema.PropertyValueContainerRoleIR, nil
}
- isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont)
+ isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont)
if err != nil {
return "", err
}
@@ -513,8 +607,8 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) {
- innerRingKeys, err := ac.ir.InnerRingKeys()
+func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) {
+ innerRingKeys, err := ac.ir.InnerRingKeys(ctx)
if err != nil {
return false, err
}
@@ -528,11 +622,11 @@ func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) {
return false, nil
}
-func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
+func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
binCnrID := make([]byte, sha256.Size)
cnrID.Encode(binCnrID)
- nm, err := netmap.GetLatestNetworkMap(ac.nm)
+ nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm)
if err != nil {
return false, err
}
@@ -543,7 +637,7 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor
// then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container
- nm, err = netmap.GetPreviousNetworkMap(ac.nm)
+ nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm)
if err != nil {
return false, err
}
@@ -568,7 +662,7 @@ func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containerc
return false
}
-func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
+func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
var ownerSDK user.ID
if owner == nil {
return "", errOwnerIDIsNotSet
@@ -576,24 +670,19 @@ func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
if err := ownerSDK.ReadFromV2(*owner); err != nil {
return "", err
}
- addr, err := ownerSDK.ScriptHash()
- if err != nil {
- return "", err
- }
+ addr := ownerSDK.ScriptHash()
namespace := ""
- subject, err := ac.frostFSIDClient.GetSubject(addr)
+ subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
if err == nil {
namespace = subject.Namespace
- } else {
- if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
- return "", fmt.Errorf("get subject error: %w", err)
- }
+ } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
+ return "", fmt.Errorf("get subject error: %w", err)
}
return namespace, nil
}
-func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) {
+func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
var ownerSDK user.ID
if owner == nil {
return "", errOwnerIDIsNotSet
@@ -601,11 +690,8 @@ func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error)
if err := ownerSDK.ReadFromV2(*owner); err != nil {
return "", err
}
- addr, err := ownerSDK.ScriptHash()
- if err != nil {
- return "", err
- }
- subject, err := ac.frostFSIDClient.GetSubject(addr)
+ addr := ownerSDK.ScriptHash()
+ subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
if err != nil {
return "", fmt.Errorf("get subject error: %w", err)
}
@@ -639,12 +725,12 @@ func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) erro
// validateNamespace validates if a namespace of a request actor equals to owner's namespace.
// An actor's namespace is calculated by a public key.
-func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error {
+func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error {
var actor user.ID
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
actorOwnerID := new(refs.OwnerID)
actor.WriteToV2(actorOwnerID)
- actorNamespace, err := ac.namespaceByOwner(actorOwnerID)
+ actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID)
if err != nil {
return fmt.Errorf("could not get actor namespace: %w", err)
}
@@ -655,11 +741,11 @@ func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNa
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
+func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
- props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk)
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
index b6b42a559..6438c34ca 100644
--- a/pkg/services/container/ape_test.go
+++ b/pkg/services/container/ape_test.go
@@ -54,6 +54,8 @@ func TestAPE(t *testing.T) {
t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace)
t.Run("deny list containers for owner with PK", testDenyListContainersForPK)
t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError)
+ t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr)
+ t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr)
}
const (
@@ -564,6 +566,185 @@ func testDenyGetContainerByIP(t *testing.T) {
require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
}
+func testDenyGetContainerSysZoneAttr(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindResource,
+ Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
+ Value: "eggplant",
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.Get(ctxWithPeerInfo(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
+}
+
+func testDenyPutContainerSysZoneAttr(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+ owner := testContainer.Owner()
+ ownerAddr := owner.ScriptHash()
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ ownerAddr: {},
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ ownerAddr: {},
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodPutContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ nativeschema.ResourceFormatRootContainers,
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindResource,
+ Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
+ Value: "eggplant",
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initPutRequest(t, testContainer)
+
+ resp, err := apeSrv.Put(ctxWithPeerInfo(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
+}
+
func testDenyGetContainerByGroupID(t *testing.T) {
t.Parallel()
srv := &srvStub{
@@ -678,8 +859,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
testContainer := containertest.Container()
owner := testContainer.Owner()
- ownerAddr, err := owner.ScriptHash()
- require.NoError(t, err)
+ ownerAddr := owner.ScriptHash()
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
ownerAddr: {},
@@ -690,7 +870,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
nm.currentEpoch = 100
nm.netmaps = map[uint64]*netmap.NetMap{}
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
Rules: []chain.Rule{
{
Status: chain.AccessDenied,
@@ -773,7 +953,7 @@ func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) {
require.NoError(t, err)
req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(t, testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(testContainer)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
@@ -857,7 +1037,7 @@ func testDenyPutContainerInvalidNamespace(t *testing.T) {
require.NoError(t, err)
req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(t, testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(testContainer)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
@@ -1079,6 +1259,11 @@ func (s *srvStub) List(context.Context, *container.ListRequest) (*container.List
return &container.ListResponse{}, nil
}
+func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error {
+ s.calls["ListStream"]++
+ return nil
+}
+
func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) {
s.calls["Put"]++
return &container.PutResponse{}, nil
@@ -1088,7 +1273,7 @@ type irStub struct {
keys [][]byte
}
-func (s *irStub) InnerRingKeys() ([][]byte, error) {
+func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) {
return s.keys, nil
}
@@ -1096,7 +1281,7 @@ type containerStub struct {
c map[cid.ID]*containercore.Container
}
-func (s *containerStub) Get(id cid.ID) (*containercore.Container, error) {
+func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) {
if v, ok := s.c[id]; ok {
return v, nil
}
@@ -1108,21 +1293,21 @@ type netmapStub struct {
currentEpoch uint64
}
-func (s *netmapStub) GetNetMap(diff uint64) (*netmap.NetMap, error) {
+func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, errors.New("invalid diff")
}
- return s.GetNetMapByEpoch(s.currentEpoch - diff)
+ return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
}
-func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
+func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, errors.New("netmap not found")
}
-func (s *netmapStub) Epoch() (uint64, error) {
+func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
return s.currentEpoch, nil
}
@@ -1131,7 +1316,7 @@ type frostfsidStub struct {
subjectsExt map[util.Uint160]*client.SubjectExtended
}
-func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) {
+func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) {
s, ok := f.subjects[owner]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -1139,7 +1324,7 @@ func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error)
return s, nil
}
-func (f *frostfsidStub) GetSubjectExtended(owner util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) {
s, ok := f.subjectsExt[owner]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -1527,26 +1712,21 @@ func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.Put
return req
}
-func initOwnerIDScriptHash(t *testing.T, testContainer cnrSDK.Container) util.Uint160 {
+func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 {
var ownerSDK *user.ID
owner := testContainer.Owner()
ownerSDK = &owner
- sc, err := ownerSDK.ScriptHash()
- require.NoError(t, err)
- return sc
+ return ownerSDK.ScriptHash()
}
func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) {
var actorUserID user.ID
user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey()))
- var err error
- actorScriptHash, err = actorUserID.ScriptHash()
- require.NoError(t, err)
+ actorScriptHash = actorUserID.ScriptHash()
var ownerUserID user.ID
user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey()))
- ownerScriptHash, err = ownerUserID.ScriptHash()
- require.NoError(t, err)
+ ownerScriptHash = ownerUserID.ScriptHash()
require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String())
return
}
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
index 03d3dc13d..b235efa3c 100644
--- a/pkg/services/container/audit.go
+++ b/pkg/services/container/audit.go
@@ -35,7 +35,7 @@ func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest)
return res, err
}
- audit.LogRequest(a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
@@ -47,7 +47,7 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(a.log, container_grpc.ContainerService_Get_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
@@ -58,18 +58,29 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(a.log, container_grpc.ContainerService_List_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
return res, err
}
+// ListStream implements Server.
+func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ err := a.next.ListStream(req, stream)
+ if !a.enabled.Load() {
+ return err
+ }
+ audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
+ return err
+}
+
// Put implements Server.
func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
res, err := a.next.Put(ctx, req)
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(a.log, container_grpc.ContainerService_Put_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req,
audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index 70234d3de..cdd0d2514 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -14,6 +14,7 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
+ ListStream(context.Context, *container.ListStreamRequest, ListStream) error
}
type executorSvc struct {
@@ -93,3 +94,11 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
s.respSvc.SetMeta(resp)
return resp, nil
}
+
+func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ err := s.exec.ListStream(stream.Context(), req, stream)
+ if err != nil {
+ return fmt.Errorf("could not execute ListStream request: %w", err)
+ }
+ return nil
+}
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index adb808af3..eaa608eba 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -25,20 +25,20 @@ type morphExecutor struct {
// Reader is an interface of read-only container storage.
type Reader interface {
containercore.Source
- containercore.EACLSource
// ContainersOf returns a list of container identifiers belonging
// to the specified user of FrostFS system. Returns the identifiers
// of all FrostFS containers if pointer to owner identifier is nil.
- ContainersOf(*user.ID) ([]cid.ID, error)
+ ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
+ IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
}
// Writer is an interface of container storage updater.
type Writer interface {
// Put stores specified container in the side chain.
- Put(containercore.Container) (*cid.ID, error)
+ Put(context.Context, containercore.Container) (*cid.ID, error)
// Delete removes specified container from the side chain.
- Delete(containercore.RemovalWitness) error
+ Delete(context.Context, containercore.RemovalWitness) error
}
func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
@@ -48,7 +48,7 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
+func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
sigV2 := body.GetSignature()
if sigV2 == nil {
// TODO(@cthulhu-rider): #468 use "const" error
@@ -81,7 +81,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con
}
}
- idCnr, err := s.wrt.Put(cnr)
+ idCnr, err := s.wrt.Put(ctx, cnr)
if err != nil {
return nil, err
}
@@ -95,7 +95,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con
return res, nil
}
-func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
+func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -125,7 +125,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *
rmWitness.Signature = body.GetSignature()
rmWitness.SessionToken = tok
- err = s.wrt.Delete(rmWitness)
+ err = s.wrt.Delete(ctx, rmWitness)
if err != nil {
return nil, err
}
@@ -133,7 +133,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *
return new(container.DeleteResponseBody), nil
}
-func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
+func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -146,7 +146,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- cnr, err := s.rdr.Get(id)
+ cnr, err := s.rdr.Get(ctx, id)
if err != nil {
return nil, err
}
@@ -173,7 +173,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (
return res, nil
}
-func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
+func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errMissingUserID
@@ -186,7 +186,7 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody)
return nil, fmt.Errorf("invalid user ID: %w", err)
}
- cnrs, err := s.rdr.ContainersOf(&id)
+ cnrs, err := s.rdr.ContainersOf(ctx, &id)
if err != nil {
return nil, err
}
@@ -201,3 +201,56 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody)
return res, nil
}
+
+func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error {
+ body := req.GetBody()
+ idV2 := body.GetOwnerID()
+ if idV2 == nil {
+ return errMissingUserID
+ }
+
+ var id user.ID
+
+ err := id.ReadFromV2(*idV2)
+ if err != nil {
+ return fmt.Errorf("invalid user ID: %w", err)
+ }
+
+ resBody := new(container.ListStreamResponseBody)
+ r := new(container.ListStreamResponse)
+ r.SetBody(resBody)
+
+ var cidList []refs.ContainerID
+
+ // Amount of containers to send at once.
+ const batchSize = 1000
+
+ processCID := func(id cid.ID) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var refID refs.ContainerID
+ id.WriteToV2(&refID)
+ cidList = append(cidList, refID)
+ if len(cidList) == batchSize {
+ r.GetBody().SetContainerIDs(cidList)
+ cidList = cidList[:0]
+ return stream.Send(r)
+ }
+ return nil
+ }
+
+ if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil {
+ return err
+ }
+
+ if len(cidList) > 0 {
+ r.GetBody().SetContainerIDs(cidList)
+ return stream.Send(r)
+ }
+
+ return nil
+}
diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go
index 87d307385..1f6fdb0be 100644
--- a/pkg/services/container/morph/executor_test.go
+++ b/pkg/services/container/morph/executor_test.go
@@ -24,11 +24,11 @@ type mock struct {
containerSvcMorph.Reader
}
-func (m mock) Put(_ containerCore.Container) (*cid.ID, error) {
+func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) {
return new(cid.ID), nil
}
-func (m mock) Delete(_ containerCore.RemovalWitness) error {
+func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error {
return nil
}
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index 78fd3d34c..d9208077d 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -3,6 +3,7 @@ package container
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
)
@@ -12,4 +13,11 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
+ ListStream(*container.ListStreamRequest, ListStream) error
+}
+
+// ListStream is an interface of FrostFS API v2 compatible search streamer.
+type ListStream interface {
+ util.ServerStream
+ Send(*container.ListStreamResponse) error
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index c478c0e1c..85fe7ae87 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -56,3 +56,40 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co
resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req))
return resp, s.sigSvc.SignResponse(resp, err)
}
+
+func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.ListStreamResponse)
+ _ = s.sigSvc.SignResponse(resp, err)
+ return stream.Send(resp)
+ }
+
+ ss := &listStreamSigner{
+ ListStream: stream,
+ sigSvc: s.sigSvc,
+ }
+ err := s.svc.ListStream(req, ss)
+ if err != nil || !ss.nonEmptyResp {
+ return ss.send(new(container.ListStreamResponse), err)
+ }
+ return nil
+}
+
+type listStreamSigner struct {
+ ListStream
+ sigSvc *util.SignService
+
+ nonEmptyResp bool // set on first Send call
+}
+
+func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error {
+ s.nonEmptyResp = true
+ return s.send(resp, nil)
+}
+
+func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error {
+ if err := s.sigSvc.SignResponse(resp, err); err != nil {
+ return err
+ }
+ return s.ListStream.Send(resp)
+}
diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go
new file mode 100644
index 000000000..4f8708da7
--- /dev/null
+++ b/pkg/services/container/transport_splitter.go
@@ -0,0 +1,92 @@
+package container
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+)
+
+type (
+ TransportSplitter struct {
+ next Server
+
+ respSvc *response.Service
+ cnrAmount uint32
+ }
+
+ listStreamMsgSizeCtrl struct {
+ util.ServerStream
+ stream ListStream
+ respSvc *response.Service
+ cnrAmount uint32
+ }
+)
+
+func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server {
+ return &TransportSplitter{
+ next: next,
+ respSvc: respSvc,
+ cnrAmount: cnrAmount,
+ }
+}
+
+func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
+ return s.next.Put(ctx, req)
+}
+
+func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
+ return s.next.Delete(ctx, req)
+}
+
+func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
+ return s.next.Get(ctx, req)
+}
+
+func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
+ return s.next.List(ctx, req)
+}
+
+func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ return s.next.ListStream(req, &listStreamMsgSizeCtrl{
+ ServerStream: stream,
+ stream: stream,
+ respSvc: s.respSvc,
+ cnrAmount: s.cnrAmount,
+ })
+}
+
+func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error {
+ s.respSvc.SetMeta(resp)
+ body := resp.GetBody()
+ ids := body.GetContainerIDs()
+
+ var newResp *container.ListStreamResponse
+
+ for {
+ if newResp == nil {
+ newResp = new(container.ListStreamResponse)
+ newResp.SetBody(body)
+ }
+
+ cut := min(s.cnrAmount, uint32(len(ids)))
+
+ body.SetContainerIDs(ids[:cut])
+ newResp.SetMetaHeader(resp.GetMetaHeader())
+ newResp.SetVerificationHeader(resp.GetVerificationHeader())
+
+ if err := s.stream.Send(newResp); err != nil {
+ return fmt.Errorf("TransportSplitter: %w", err)
+ }
+
+ ids = ids[cut:]
+
+ if len(ids) == 0 {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go
index e54fa9824..d9f65a2fc 100644
--- a/pkg/services/control/ir/server/audit.go
+++ b/pkg/services/control/ir/server/audit.go
@@ -36,7 +36,7 @@ func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheck
if !a.enabled.Load() {
return res, err
}
- audit.LogRequestWithKey(a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
return res, err
}
@@ -79,7 +79,7 @@ func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveC
}
}
- audit.LogRequestWithKey(a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
return res, err
}
@@ -90,7 +90,7 @@ func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRe
return res, err
}
- audit.LogRequestWithKey(a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil)
return res, err
}
@@ -102,7 +102,7 @@ func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequ
return res, err
}
- audit.LogRequestWithKey(a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
nil, err == nil)
return res, err
}
diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go
index 63be22411..0509d2646 100644
--- a/pkg/services/control/ir/server/calls.go
+++ b/pkg/services/control/ir/server/calls.go
@@ -40,7 +40,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest)
// TickEpoch forces a new epoch.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
+func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -48,12 +48,12 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c
resp := new(control.TickEpochResponse)
resp.SetBody(new(control.TickEpochResponse_Body))
- epoch, err := s.netmapClient.Epoch()
+ epoch, err := s.netmapClient.Epoch(ctx)
if err != nil {
return nil, fmt.Errorf("getting current epoch: %w", err)
}
- vub, err := s.netmapClient.NewEpochControl(epoch+1, req.GetBody().GetVub())
+ vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub())
if err != nil {
return nil, fmt.Errorf("forcing new epoch: %w", err)
}
@@ -69,7 +69,7 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c
// RemoveNode forces a node removal.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
+func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -77,7 +77,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (
resp := new(control.RemoveNodeResponse)
resp.SetBody(new(control.RemoveNodeResponse_Body))
- nm, err := s.netmapClient.NetMap()
+ nm, err := s.netmapClient.NetMap(ctx)
if err != nil {
return nil, fmt.Errorf("getting netmap: %w", err)
}
@@ -95,7 +95,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (
return nil, status.Error(codes.FailedPrecondition, "node is already offline")
}
- vub, err := s.netmapClient.ForceRemovePeer(nodeInfo, req.GetBody().GetVub())
+ vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub())
if err != nil {
return nil, fmt.Errorf("forcing node removal: %w", err)
}
@@ -109,7 +109,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (
}
// RemoveContainer forces a container removal.
-func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
+func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -124,7 +124,7 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer
return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error())
}
var err error
- vub, err = s.removeContainer(containerID, req.GetBody().GetVub())
+ vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
if err != nil {
return nil, err
}
@@ -138,13 +138,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer
return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error())
}
- cids, err := s.containerClient.ContainersOf(&owner)
+ cids, err := s.containerClient.ContainersOf(ctx, &owner)
if err != nil {
return nil, fmt.Errorf("failed to get owner's containers: %w", err)
}
for _, containerID := range cids {
- vub, err = s.removeContainer(containerID, req.GetBody().GetVub())
+ vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
if err != nil {
return nil, err
}
@@ -162,13 +162,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer
return resp, nil
}
-func (s *Server) removeContainer(containerID cid.ID, vub uint32) (uint32, error) {
+func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) {
var prm container.DeletePrm
prm.SetCID(containerID[:])
prm.SetControlTX(true)
prm.SetVUB(vub)
- vub, err := s.containerClient.Delete(prm)
+ vub, err := s.containerClient.Delete(ctx, prm)
if err != nil {
return 0, fmt.Errorf("forcing container removal: %w", err)
}
diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go
index c2a4f88a6..0cfca71c1 100644
--- a/pkg/services/control/ir/server/server.go
+++ b/pkg/services/control/ir/server/server.go
@@ -35,8 +35,7 @@ func panicOnPrmValue(n string, v any) {
// the parameterized private key.
func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server {
// verify required parameters
- switch {
- case prm.healthChecker == nil:
+ if prm.healthChecker == nil {
panicOnPrmValue("health checker", prm.healthChecker)
}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 514061db4..0c4236d0e 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -1,6 +1,8 @@
package control
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
)
@@ -15,7 +17,6 @@ const (
rpcListShards = "ListShards"
rpcSetShardMode = "SetShardMode"
rpcSynchronizeTree = "SynchronizeTree"
- rpcEvacuateShard = "EvacuateShard"
rpcStartShardEvacuation = "StartShardEvacuation"
rpcGetShardEvacuationStatus = "GetShardEvacuationStatus"
rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus"
@@ -31,6 +32,7 @@ const (
rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides"
rpcDetachShards = "DetachShards"
rpcStartShardRebuild = "StartShardRebuild"
+ rpcListShardsForObject = "ListShardsForObject"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -74,6 +76,7 @@ func SetNetmapStatus(
// GetNetmapStatus executes ControlService.GetNetmapStatus RPC.
func GetNetmapStatus(
+ _ context.Context,
cli *client.Client,
req *GetNetmapStatusRequest,
opts ...client.CallOption,
@@ -162,19 +165,6 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl
return wResp.message, nil
}
-// EvacuateShard executes ControlService.EvacuateShard RPC.
-func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) {
- wResp := newResponseWrapper[EvacuateShardResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
// StartShardEvacuation executes ControlService.StartShardEvacuation RPC.
func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) {
wResp := newResponseWrapper[StartShardEvacuationResponse]()
@@ -375,3 +365,22 @@ func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts .
return wResp.message, nil
}
+
+// ListShardsForObject executes ControlService.ListShardsForObject RPC.
+func ListShardsForObject(
+ cli *client.Client,
+ req *ListShardsForObjectRequest,
+ opts ...client.CallOption,
+) (*ListShardsForObjectResponse, error) {
+ wResp := newResponseWrapper[ListShardsForObjectResponse]()
+
+ wReq := &requestWrapper{
+ m: req,
+ }
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go
index a4111bddb..ffd36962b 100644
--- a/pkg/services/control/server/detach_shards.go
+++ b/pkg/services/control/server/detach_shards.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
+func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -19,7 +19,7 @@ func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsReques
shardIDs := s.getShardIDList(req.GetBody().GetShard_ID())
- if err := s.s.DetachShards(shardIDs); err != nil {
+ if err := s.s.DetachShards(ctx, shardIDs); err != nil {
if errors.As(err, new(logicerr.Logical)) {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
deleted file mode 100644
index ae3413373..000000000
--- a/pkg/services/control/server/evacuate.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package control
-
-import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/hex"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
-
-func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- prm := engine.EvacuateShardPrm{
- ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
- IgnoreErrors: req.GetBody().GetIgnoreErrors(),
- ObjectsHandler: s.replicateObject,
- Scope: engine.EvacuateScopeObjects,
- }
-
- res, err := s.s.Evacuate(ctx, prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := &control.EvacuateShardResponse{
- Body: &control.EvacuateShardResponse_Body{
- Count: uint32(res.ObjectsEvacuated()),
- },
- }
-
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- cid, ok := obj.ContainerID()
- if !ok {
- // Return nil to prevent situations where a shard can't be evacuated
- // because of a single bad/corrupted object.
- return false, nil
- }
-
- nodes, err := s.getContainerNodes(cid)
- if err != nil {
- return false, err
- }
-
- if len(nodes) == 0 {
- return false, nil
- }
-
- var res replicatorResult
- task := replicator.Task{
- NumCopies: 1,
- Addr: addr,
- Obj: obj,
- Nodes: nodes,
- }
- s.replicator.HandleReplicationTask(ctx, task, &res)
-
- if res.count == 0 {
- return false, errors.New("object was not replicated")
- }
- return true, nil
-}
-
-func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
- nodes, err := s.getContainerNodes(contID)
- if err != nil {
- return false, "", err
- }
- if len(nodes) == 0 {
- return false, "", nil
- }
-
- for _, node := range nodes {
- err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
- if err == nil {
- return true, hex.EncodeToString(node.PublicKey()), nil
- }
- }
- return false, "", err
-}
-
-func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
- rawCID := make([]byte, sha256.Size)
- contID.Encode(rawCID)
-
- var height uint64
- for {
- op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
- if err != nil {
- return err
- }
-
- if op.Time == 0 {
- return nil
- }
-
- req := &tree.ApplyRequest{
- Body: &tree.ApplyRequest_Body{
- ContainerId: rawCID,
- TreeId: treeID,
- Operation: &tree.LogMove{
- ParentId: op.Parent,
- Meta: op.Meta.Bytes(),
- ChildId: op.Child,
- },
- },
- }
-
- err = tree.SignMessage(req, s.key)
- if err != nil {
- return fmt.Errorf("can't message apply request: %w", err)
- }
-
- err = s.treeService.ReplicateTreeOp(ctx, node, req)
- if err != nil {
- return err
- }
-
- height = op.Time + 1
- }
-}
-
-func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
- nm, err := s.netMapSrc.GetNetMap(0)
- if err != nil {
- return nil, err
- }
-
- c, err := s.cnrSrc.Get(contID)
- if err != nil {
- return nil, err
- }
-
- binCnr := make([]byte, sha256.Size)
- contID.Encode(binCnr)
-
- ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
- if err != nil {
- return nil, errFailedToBuildListOfContainerNodes
- }
-
- nodes := placement.FlattenNodes(ns)
- bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
- if bytes.Equal(nodes[i].PublicKey(), bs) {
- copy(nodes[i:], nodes[i+1:])
- nodes = nodes[:len(nodes)-1]
- }
- }
- return nodes, nil
-}
-
-type replicatorResult struct {
- count int
-}
-
-// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
-func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
- r.count++
-}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index 146ac7e16..f3ba9015e 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -1,17 +1,32 @@
package control
import (
+ "bytes"
"context"
+ "crypto/sha256"
+ "encoding/hex"
"errors"
+ "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
+var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
+
func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) {
err := s.isValidRequest(req)
if err != nil {
@@ -27,15 +42,13 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
IgnoreErrors: req.GetBody().GetIgnoreErrors(),
ObjectsHandler: s.replicateObject,
TreeHandler: s.replicateTree,
- Async: true,
Scope: engine.EvacuateScope(req.GetBody().GetScope()),
ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
RepOneOnly: req.GetBody().GetRepOneOnly(),
}
- _, err = s.s.Evacuate(ctx, prm)
- if err != nil {
+ if err = s.s.Evacuate(ctx, prm); err != nil {
var logicalErr logicerr.Logical
if errors.As(err, &logicalErr) {
return nil, status.Error(codes.Aborted, err.Error())
@@ -135,3 +148,133 @@ func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.Re
}
return resp, nil
}
+
+func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
+ cid, ok := obj.ContainerID()
+ if !ok {
+ // Return nil to prevent situations where a shard can't be evacuated
+ // because of a single bad/corrupted object.
+ return false, nil
+ }
+
+ nodes, err := s.getContainerNodes(ctx, cid)
+ if err != nil {
+ return false, err
+ }
+
+ if len(nodes) == 0 {
+ return false, nil
+ }
+
+ var res replicatorResult
+ task := replicator.Task{
+ NumCopies: 1,
+ Addr: addr,
+ Obj: obj,
+ Nodes: nodes,
+ }
+ s.replicator.HandleReplicationTask(ctx, task, &res)
+
+ if res.count == 0 {
+ return false, errors.New("object was not replicated")
+ }
+ return true, nil
+}
+
+func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
+ nodes, err := s.getContainerNodes(ctx, contID)
+ if err != nil {
+ return false, "", err
+ }
+ if len(nodes) == 0 {
+ return false, "", nil
+ }
+
+ for _, node := range nodes {
+ err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
+ if err == nil {
+ return true, hex.EncodeToString(node.PublicKey()), nil
+ }
+ }
+ return false, "", err
+}
+
+func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
+ rawCID := make([]byte, sha256.Size)
+ contID.Encode(rawCID)
+
+ var height uint64
+ for {
+ op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
+ if err != nil {
+ return err
+ }
+
+ if op.Time == 0 {
+ return nil
+ }
+
+ req := &tree.ApplyRequest{
+ Body: &tree.ApplyRequest_Body{
+ ContainerId: rawCID,
+ TreeId: treeID,
+ Operation: &tree.LogMove{
+ ParentId: op.Parent,
+ Meta: op.Bytes(),
+ ChildId: op.Child,
+ },
+ },
+ }
+
+ err = tree.SignMessage(req, s.key)
+ if err != nil {
+ return fmt.Errorf("can't message apply request: %w", err)
+ }
+
+ err = s.treeService.ReplicateTreeOp(ctx, node, req)
+ if err != nil {
+ return err
+ }
+
+ height = op.Time + 1
+ }
+}
+
+func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) {
+ nm, err := s.netMapSrc.GetNetMap(ctx, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := s.cnrSrc.Get(ctx, contID)
+ if err != nil {
+ return nil, err
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ contID.Encode(binCnr)
+
+ ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
+ if err != nil {
+ return nil, errFailedToBuildListOfContainerNodes
+ }
+
+ nodes := placement.FlattenNodes(ns)
+ bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
+ for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
+ if bytes.Equal(nodes[i].PublicKey(), bs) {
+ copy(nodes[i:], nodes[i+1:])
+ nodes = nodes[:len(nodes)-1]
+ }
+ }
+ return nodes, nil
+}
+
+type replicatorResult struct {
+ count int
+}
+
+// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
+func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
+ r.count++
+}
diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go
index d9fefc38e..a8ef7809e 100644
--- a/pkg/services/control/server/gc.go
+++ b/pkg/services/control/server/gc.go
@@ -42,8 +42,7 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques
prm.WithForceRemoval()
prm.WithAddress(addrList[i])
- _, err := s.s.Delete(ctx, prm)
- if err != nil && firstErr == nil {
+ if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil {
firstErr = err
}
}
diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go
index 1c038253a..5e0496910 100644
--- a/pkg/services/control/server/get_netmap_status.go
+++ b/pkg/services/control/server/get_netmap_status.go
@@ -10,12 +10,12 @@ import (
)
// GetNetmapStatus gets node status in FrostFS network.
-func (s *Server) GetNetmapStatus(_ context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
+func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
- st, epoch, err := s.nodeState.GetNetmapStatus()
+ st, epoch, err := s.nodeState.GetNetmapStatus(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go
new file mode 100644
index 000000000..39565ed50
--- /dev/null
+++ b/pkg/services/control/server/list_shards_for_object.go
@@ -0,0 +1,65 @@
+package control
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ var obj oid.ID
+ err = obj.DecodeString(req.GetBody().GetObjectId())
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ var cnr cid.ID
+ err = cnr.DecodeString(req.GetBody().GetContainerId())
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ resp := new(control.ListShardsForObjectResponse)
+ body := new(control.ListShardsForObjectResponse_Body)
+ resp.SetBody(body)
+
+ var objAddr oid.Address
+ objAddr.SetContainer(cnr)
+ objAddr.SetObject(obj)
+ info, err := s.s.ListShardsForObject(ctx, objAddr)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ if len(info) == 0 {
+ return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject)
+ }
+
+ body.SetShard_ID(shardInfoToProto(info))
+
+ // Sign the response
+ if err := ctrlmessage.Sign(s.key, resp); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func shardInfoToProto(infos []shard.Info) [][]byte {
+ shardInfos := make([][]byte, 0, len(infos))
+ for _, info := range infos {
+ shardInfos = append(shardInfos, *info.ID)
+ }
+
+ return shardInfos
+}
diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go
index b6fdcb246..59d701bc6 100644
--- a/pkg/services/control/server/server.go
+++ b/pkg/services/control/server/server.go
@@ -1,6 +1,7 @@
package control
import (
+ "context"
"crypto/ecdsa"
"sync/atomic"
@@ -45,13 +46,13 @@ type NodeState interface {
//
// If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed
// in the network settings, the node additionally starts local maintenance.
- SetNetmapStatus(st control.NetmapStatus) error
+ SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error
// ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE)
// but starts local maintenance regardless of the network settings.
- ForceMaintenance() error
+ ForceMaintenance(ctx context.Context) error
- GetNetmapStatus() (control.NetmapStatus, uint64, error)
+ GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error)
}
// LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine
diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go
index 3fd69df12..529041dca 100644
--- a/pkg/services/control/server/set_netmap_status.go
+++ b/pkg/services/control/server/set_netmap_status.go
@@ -12,7 +12,7 @@ import (
// SetNetmapStatus sets node status in FrostFS network.
//
// If request is unsigned or signed by disallowed key, permission error returns.
-func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
+func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
// verify request
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -29,9 +29,9 @@ func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatus
"force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE)
}
- err = s.nodeState.ForceMaintenance()
+ err = s.nodeState.ForceMaintenance(ctx)
} else {
- err = s.nodeState.SetNetmapStatus(st)
+ err = s.nodeState.SetNetmapStatus(ctx, st)
}
if err != nil {
diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go
index 52835c41d..4f8796263 100644
--- a/pkg/services/control/server/set_shard_mode.go
+++ b/pkg/services/control/server/set_shard_mode.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
+func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
// verify request
err := s.isValidRequest(req)
if err != nil {
@@ -38,7 +38,7 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques
}
for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
- err = s.s.SetShardMode(shardID, m, req.GetBody().GetResetErrorCounter())
+ err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index ae1939e13..4c539acfc 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -30,11 +30,6 @@ service ControlService {
// Synchronizes all log operations for the specified tree.
rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
- // EvacuateShard moves all data from one shard to the others.
- // Deprecated: Use
- // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
- rpc EvacuateShard(EvacuateShardRequest) returns (EvacuateShardResponse);
-
// StartShardEvacuation starts moving all data from one shard to the others.
rpc StartShardEvacuation(StartShardEvacuationRequest)
returns (StartShardEvacuationResponse);
@@ -94,6 +89,9 @@ service ControlService {
// StartShardRebuild starts shard rebuild process.
rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse);
+
+ // ListShardsForObject returns shard info where object is stored.
+ rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse);
}
// Health check request.
@@ -734,3 +732,23 @@ message StartShardRebuildResponse {
Signature signature = 2;
}
+
+message ListShardsForObjectRequest {
+ message Body {
+ string object_id = 1;
+ string container_id = 2;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message ListShardsForObjectResponse {
+ message Body {
+ // List of the node's shards storing object.
+ repeated bytes shard_ID = 1;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 0b4e3cf32..44849d591 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -17303,3 +17303,727 @@ func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
in.Consumed()
}
}
+
+type ListShardsForObjectRequest_Body struct {
+ ObjectId string `json:"objectId"`
+ ContainerId string `json:"containerId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.StringSize(1, x.ObjectId)
+ size += proto.StringSize(2, x.ContainerId)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ObjectId) != 0 {
+ mm.AppendString(1, x.ObjectId)
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendString(2, x.ContainerId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ObjectId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ObjectId")
+ }
+ x.ObjectId = data
+ case 2: // ContainerId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest_Body) GetObjectId() string {
+ if x != nil {
+ return x.ObjectId
+ }
+ return ""
+}
+func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) {
+ x.ObjectId = v
+}
+func (x *ListShardsForObjectRequest_Body) GetContainerId() string {
+ if x != nil {
+ return x.ContainerId
+ }
+ return ""
+}
+func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) {
+ x.ContainerId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"objectId\":"
+ out.RawString(prefix)
+ out.String(x.ObjectId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ out.String(x.ContainerId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "objectId":
+ {
+ var f string
+ f = in.String()
+ x.ObjectId = f
+ }
+ case "containerId":
+ {
+ var f string
+ f = in.String()
+ x.ContainerId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectRequest struct {
+ Body *ListShardsForObjectRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil)
+ _ json.Marshaler = (*ListShardsForObjectRequest)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListShardsForObjectRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsForObjectRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) {
+ x.Body = v
+}
+func (x *ListShardsForObjectRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsForObjectRequest_Body
+ f = new(ListShardsForObjectRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectResponse_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectResponse struct {
+ Body *ListShardsForObjectResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil)
+ _ json.Marshaler = (*ListShardsForObjectResponse)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListShardsForObjectResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsForObjectResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) {
+ x.Body = v
+}
+func (x *ListShardsForObjectResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsForObjectResponse_Body
+ f = new(ListShardsForObjectResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index f5cfefa85..045662ccf 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -26,7 +26,6 @@ const (
ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
- ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard"
ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation"
ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus"
ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus"
@@ -42,6 +41,7 @@ const (
ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache"
ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards"
ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild"
+ ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject"
)
// ControlServiceClient is the client API for ControlService service.
@@ -62,10 +62,6 @@ type ControlServiceClient interface {
SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error)
- // EvacuateShard moves all data from one shard to the others.
- // Deprecated: Use
- // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
- EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
@@ -100,6 +96,8 @@ type ControlServiceClient interface {
DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error)
// StartShardRebuild starts shard rebuild process.
StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error)
+ // ListShardsForObject returns shard info where object is stored.
+ ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error)
}
type controlServiceClient struct {
@@ -173,15 +171,6 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron
return out, nil
}
-func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) {
- out := new(EvacuateShardResponse)
- err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) {
out := new(StartShardEvacuationResponse)
err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...)
@@ -317,6 +306,15 @@ func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartS
return out, nil
}
+func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) {
+ out := new(ListShardsForObjectResponse)
+ err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// ControlServiceServer is the server API for ControlService service.
// All implementations should embed UnimplementedControlServiceServer
// for forward compatibility
@@ -335,10 +333,6 @@ type ControlServiceServer interface {
SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error)
- // EvacuateShard moves all data from one shard to the others.
- // Deprecated: Use
- // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
- EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
@@ -373,6 +367,8 @@ type ControlServiceServer interface {
DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error)
// StartShardRebuild starts shard rebuild process.
StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error)
+ // ListShardsForObject returns shard info where object is stored.
+ ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -400,9 +396,6 @@ func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShard
func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented")
}
-func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented")
-}
func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented")
}
@@ -448,6 +441,9 @@ func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachSh
func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented")
}
+func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented")
+}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -586,24 +582,6 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
-func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(EvacuateShardRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).EvacuateShard(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_EvacuateShard_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StartShardEvacuationRequest)
if err := dec(in); err != nil {
@@ -874,6 +852,24 @@ func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
+func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListShardsForObjectRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).ListShardsForObject(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_ListShardsForObject_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -909,10 +905,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SynchronizeTree",
Handler: _ControlService_SynchronizeTree_Handler,
},
- {
- MethodName: "EvacuateShard",
- Handler: _ControlService_EvacuateShard_Handler,
- },
{
MethodName: "StartShardEvacuation",
Handler: _ControlService_StartShardEvacuation_Handler,
@@ -973,6 +965,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "StartShardRebuild",
Handler: _ControlService_StartShardRebuild_Handler,
},
+ {
+ MethodName: "ListShardsForObject",
+ Handler: _ControlService_ListShardsForObject_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go
index 5223047df..1b92fdaad 100644
--- a/pkg/services/netmap/executor.go
+++ b/pkg/services/netmap/executor.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
@@ -42,14 +43,16 @@ type NetworkInfo interface {
// Dump must return recent network information in FrostFS API v2 NetworkInfo structure.
//
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
- Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
+ Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error)
}
func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server {
- if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil {
- // this should never happen, otherwise it programmers bug
- panic("can't create netmap execution service")
- }
+ // this should never happen, otherwise it's a programmer's bug
+ msg := "BUG: can't create netmap execution service"
+ assert.False(s == nil, msg, "node state is nil")
+ assert.False(netInfo == nil, msg, "network info is nil")
+ assert.False(respSvc == nil, msg, "response service is nil")
+ assert.True(version.IsValid(v), msg, "invalid version")
res := &executorSvc{
state: s,
@@ -82,7 +85,7 @@ func (s *executorSvc) LocalNodeInfo(
}
func (s *executorSvc) NetworkInfo(
- _ context.Context,
+ ctx context.Context,
req *netmap.NetworkInfoRequest,
) (*netmap.NetworkInfoResponse, error) {
verV2 := req.GetMetaHeader().GetVersion()
@@ -95,7 +98,7 @@ func (s *executorSvc) NetworkInfo(
return nil, fmt.Errorf("can't read version: %w", err)
}
- ni, err := s.netInfo.Dump(ver)
+ ni, err := s.netInfo.Dump(ctx, ver)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go
deleted file mode 100644
index 921545c8b..000000000
--- a/pkg/services/object/acl/acl.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package acl
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "errors"
- "fmt"
- "io"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- eaclV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/eacl/v2"
- v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
- bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-// Checker implements v2.ACLChecker interfaces and provides
-// ACL/eACL validation functionality.
-type Checker struct {
- eaclSrc container.EACLSource
- validator *eaclSDK.Validator
- localStorage *engine.StorageEngine
- state netmap.State
-}
-
-type localStorage struct {
- ls *engine.StorageEngine
-}
-
-func (s *localStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
- if s.ls == nil {
- return nil, io.ErrUnexpectedEOF
- }
-
- return engine.Head(ctx, s.ls, addr)
-}
-
-// Various EACL check errors.
-var (
- errEACLDeniedByRule = errors.New("denied by rule")
- errBearerExpired = errors.New("bearer token has expired")
- errBearerInvalidSignature = errors.New("bearer token has invalid signature")
- errBearerInvalidContainerID = errors.New("bearer token was created for another container")
- errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
- errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
-)
-
-// NewChecker creates Checker.
-// Panics if at least one of the parameter is nil.
-func NewChecker(
- state netmap.State,
- eaclSrc container.EACLSource,
- validator *eaclSDK.Validator,
- localStorage *engine.StorageEngine,
-) *Checker {
- return &Checker{
- eaclSrc: eaclSrc,
- validator: validator,
- localStorage: localStorage,
- state: state,
- }
-}
-
-// CheckBasicACL is a main check function for basic ACL.
-func (c *Checker) CheckBasicACL(info v2.RequestInfo) bool {
- // check basic ACL permissions
- return info.BasicACL().IsOpAllowed(info.Operation(), info.RequestRole())
-}
-
-// StickyBitCheck validates owner field in the request if sticky bit is enabled.
-func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool {
- // According to FrostFS specification sticky bit has no effect on system nodes
- // for correct intra-container work with objects (in particular, replication).
- if info.RequestRole() == acl.RoleContainer {
- return true
- }
-
- if !info.BasicACL().Sticky() {
- return true
- }
-
- if len(info.SenderKey()) == 0 {
- return false
- }
-
- requestSenderKey := unmarshalPublicKey(info.SenderKey())
-
- return isOwnerFromKey(owner, requestSenderKey)
-}
-
-// CheckEACL is a main check function for extended ACL.
-func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
- basicACL := reqInfo.BasicACL()
- if !basicACL.Extendable() {
- return nil
- }
-
- bearerTok := reqInfo.Bearer()
- impersonate := bearerTok != nil && bearerTok.Impersonate()
-
- // if bearer token is not allowed, then ignore it
- if impersonate || !basicACL.AllowedBearerRules(reqInfo.Operation()) {
- reqInfo.CleanBearer()
- }
-
- var table eaclSDK.Table
- cnr := reqInfo.ContainerID()
-
- if bearerTok == nil {
- eaclInfo, err := c.eaclSrc.GetEACL(cnr)
- if err != nil {
- if client.IsErrEACLNotFound(err) {
- return nil
- }
- return err
- }
-
- table = *eaclInfo.Value
- } else {
- table = bearerTok.EACLTable()
- }
-
- // if bearer token is not present, isValidBearer returns true
- if err := isValidBearer(reqInfo, c.state); err != nil {
- return err
- }
-
- hdrSrc, err := c.getHeaderSource(cnr, msg, reqInfo)
- if err != nil {
- return err
- }
-
- eaclRole := getRole(reqInfo)
-
- action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit).
- WithRole(eaclRole).
- WithOperation(eaclSDK.Operation(reqInfo.Operation())).
- WithContainerID(&cnr).
- WithSenderKey(reqInfo.SenderKey()).
- WithHeaderSource(hdrSrc).
- WithEACLTable(&table),
- )
-
- if action != eaclSDK.ActionAllow {
- return errEACLDeniedByRule
- }
- return nil
-}
-
-func getRole(reqInfo v2.RequestInfo) eaclSDK.Role {
- var eaclRole eaclSDK.Role
- switch op := reqInfo.RequestRole(); op {
- default:
- eaclRole = eaclSDK.Role(op)
- case acl.RoleOwner:
- eaclRole = eaclSDK.RoleUser
- case acl.RoleInnerRing, acl.RoleContainer:
- eaclRole = eaclSDK.RoleSystem
- case acl.RoleOthers:
- eaclRole = eaclSDK.RoleOthers
- }
- return eaclRole
-}
-
-func (c *Checker) getHeaderSource(cnr cid.ID, msg any, reqInfo v2.RequestInfo) (eaclSDK.TypedHeaderSource, error) {
- var xHeaderSource eaclV2.XHeaderSource
- if req, ok := msg.(eaclV2.Request); ok {
- xHeaderSource = eaclV2.NewRequestXHeaderSource(req)
- } else {
- xHeaderSource = eaclV2.NewResponseXHeaderSource(msg.(eaclV2.Response), reqInfo.Request().(eaclV2.Request))
- }
-
- hdrSrc, err := eaclV2.NewMessageHeaderSource(&localStorage{ls: c.localStorage}, xHeaderSource, cnr, eaclV2.WithOID(reqInfo.ObjectID()))
- if err != nil {
- return nil, fmt.Errorf("can't parse headers: %w", err)
- }
- return hdrSrc, nil
-}
-
-// isValidBearer checks whether bearer token was correctly signed by authorized
-// entity. This method might be defined on whole ACL service because it will
-// require fetching current epoch to check lifetime.
-func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error {
- ownerCnr := reqInfo.ContainerOwner()
-
- token := reqInfo.Bearer()
-
- // 0. Check if bearer token is present in reqInfo.
- if token == nil {
- return nil
- }
-
- // 1. First check token lifetime. Simplest verification.
- if token.InvalidAt(st.CurrentEpoch()) {
- return errBearerExpired
- }
-
- // 2. Then check if bearer token is signed correctly.
- if !token.VerifySignature() {
- return errBearerInvalidSignature
- }
-
- // 3. Then check if container is either empty or equal to the container in the request.
- cnr, isSet := token.EACLTable().CID()
- if isSet && !cnr.Equals(reqInfo.ContainerID()) {
- return errBearerInvalidContainerID
- }
-
- // 4. Then check if container owner signed this token.
- if !bearerSDK.ResolveIssuer(*token).Equals(ownerCnr) {
- // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
- return errBearerNotSignedByOwner
- }
-
- // 5. Then check if request sender has rights to use this token.
- var keySender frostfsecdsa.PublicKey
-
- err := keySender.Decode(reqInfo.SenderKey())
- if err != nil {
- return fmt.Errorf("decode sender public key: %w", err)
- }
-
- var usrSender user.ID
- user.IDFromKey(&usrSender, ecdsa.PublicKey(keySender))
-
- if !token.AssertUser(usrSender) {
- // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
- return errBearerInvalidOwner
- }
-
- return nil
-}
-
-func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
- if key == nil {
- return false
- }
-
- var id2 user.ID
- user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
-
- return id.Equals(id2)
-}
-
-func unmarshalPublicKey(bs []byte) *keys.PublicKey {
- pub, err := keys.NewPublicKeyFromBytes(bs, elliptic.P256())
- if err != nil {
- return nil
- }
- return pub
-}
diff --git a/pkg/services/object/acl/acl_test.go b/pkg/services/object/acl/acl_test.go
deleted file mode 100644
index d63cb1285..000000000
--- a/pkg/services/object/acl/acl_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package acl
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "github.com/stretchr/testify/require"
-)
-
-type emptyEACLSource struct{}
-
-func (e emptyEACLSource) GetEACL(_ cid.ID) (*container.EACL, error) {
- return nil, nil
-}
-
-type emptyNetmapState struct{}
-
-func (e emptyNetmapState) CurrentEpoch() uint64 {
- return 0
-}
-
-func TestStickyCheck(t *testing.T) {
- checker := NewChecker(
- emptyNetmapState{},
- emptyEACLSource{},
- eaclSDK.NewValidator(),
- &engine.StorageEngine{})
-
- t.Run("system role", func(t *testing.T) {
- var info v2.RequestInfo
-
- info.SetSenderKey(make([]byte, 33)) // any non-empty key
- info.SetRequestRole(acl.RoleContainer)
-
- require.True(t, checker.StickyBitCheck(info, usertest.ID()))
-
- var basicACL acl.Basic
- basicACL.MakeSticky()
-
- info.SetBasicACL(basicACL)
-
- require.True(t, checker.StickyBitCheck(info, usertest.ID()))
- })
-
- t.Run("owner ID and/or public key emptiness", func(t *testing.T) {
- var info v2.RequestInfo
-
- info.SetRequestRole(acl.RoleOthers) // should be non-system role
-
- assertFn := func(isSticky, withKey, withOwner, expected bool) {
- info := info
- if isSticky {
- var basicACL acl.Basic
- basicACL.MakeSticky()
-
- info.SetBasicACL(basicACL)
- }
-
- if withKey {
- info.SetSenderKey(make([]byte, 33))
- } else {
- info.SetSenderKey(nil)
- }
-
- var ownerID user.ID
-
- if withOwner {
- ownerID = usertest.ID()
- }
-
- require.Equal(t, expected, checker.StickyBitCheck(info, ownerID))
- }
-
- assertFn(true, false, false, false)
- assertFn(true, true, false, false)
- assertFn(true, false, true, false)
- assertFn(false, false, false, true)
- assertFn(false, true, false, true)
- assertFn(false, false, true, true)
- assertFn(false, true, true, true)
- })
-}
diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go
deleted file mode 100644
index 94e015abe..000000000
--- a/pkg/services/object/acl/eacl/v2/eacl_test.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package v2
-
-import (
- "context"
- "crypto/ecdsa"
- "errors"
- "testing"
-
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-type testLocalStorage struct {
- t *testing.T
-
- expAddr oid.Address
-
- obj *objectSDK.Object
-
- err error
-}
-
-func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
- require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
- require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
-
- return s.obj, s.err
-}
-
-func testXHeaders(strs ...string) []session.XHeader {
- res := make([]session.XHeader, len(strs)/2)
-
- for i := 0; i < len(strs); i += 2 {
- res[i/2].SetKey(strs[i])
- res[i/2].SetValue(strs[i+1])
- }
-
- return res
-}
-
-func TestHeadRequest(t *testing.T) {
- req := new(objectV2.HeadRequest)
-
- meta := new(session.RequestMetaHeader)
- req.SetMetaHeader(meta)
-
- body := new(objectV2.HeadRequestBody)
- req.SetBody(body)
-
- addr := oidtest.Address()
-
- var addrV2 refs.Address
- addr.WriteToV2(&addrV2)
-
- body.SetAddress(&addrV2)
-
- xKey := "x-key"
- xVal := "x-val"
- xHdrs := testXHeaders(
- xKey, xVal,
- )
-
- meta.SetXHeaders(xHdrs)
-
- obj := objectSDK.New()
-
- attrKey := "attr_key"
- attrVal := "attr_val"
- var attr objectSDK.Attribute
- attr.SetKey(attrKey)
- attr.SetValue(attrVal)
- obj.SetAttributes(attr)
-
- table := new(eaclSDK.Table)
-
- priv, err := keys.NewPrivateKey()
- require.NoError(t, err)
- senderKey := priv.PublicKey()
-
- r := eaclSDK.NewRecord()
- r.SetOperation(eaclSDK.OperationHead)
- r.SetAction(eaclSDK.ActionDeny)
- r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal)
- r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal)
- eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
-
- table.AddRecord(r)
-
- lStorage := &testLocalStorage{
- t: t,
- expAddr: addr,
- obj: obj,
- }
-
- id := addr.Object()
-
- newSource := func(t *testing.T) eaclSDK.TypedHeaderSource {
- hdrSrc, err := NewMessageHeaderSource(
- lStorage,
- NewRequestXHeaderSource(req),
- addr.Container(),
- WithOID(&id))
- require.NoError(t, err)
- return hdrSrc
- }
-
- cnr := addr.Container()
-
- unit := new(eaclSDK.ValidationUnit).
- WithContainerID(&cnr).
- WithOperation(eaclSDK.OperationHead).
- WithSenderKey(senderKey.Bytes()).
- WithEACLTable(table)
-
- validator := eaclSDK.NewValidator()
-
- checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t)))
-
- meta.SetXHeaders(nil)
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- meta.SetXHeaders(xHdrs)
-
- obj.SetAttributes()
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- lStorage.err = errors.New("any error")
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- r.SetAction(eaclSDK.ActionAllow)
-
- rID := eaclSDK.NewRecord()
- rID.SetOperation(eaclSDK.OperationHead)
- rID.SetAction(eaclSDK.ActionDeny)
- rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object())
- eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
-
- table = eaclSDK.NewTable()
- table.AddRecord(r)
- table.AddRecord(rID)
-
- unit.WithEACLTable(table)
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-}
-
-func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
- actual, fromRule := v.CalculateAction(u)
- require.True(t, fromRule)
- require.Equal(t, expected, actual)
-}
-
-func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
- actual, fromRule := v.CalculateAction(u)
- require.False(t, fromRule)
- require.Equal(t, eaclSDK.ActionAllow, actual)
-}
diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go
deleted file mode 100644
index ecb793df8..000000000
--- a/pkg/services/object/acl/eacl/v2/headers.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package v2
-
-import (
- "context"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-type Option func(*cfg)
-
-type cfg struct {
- storage ObjectStorage
-
- msg XHeaderSource
-
- cnr cid.ID
- obj *oid.ID
-}
-
-type ObjectStorage interface {
- Head(context.Context, oid.Address) (*objectSDK.Object, error)
-}
-
-type Request interface {
- GetMetaHeader() *session.RequestMetaHeader
-}
-
-type Response interface {
- GetMetaHeader() *session.ResponseMetaHeader
-}
-
-type headerSource struct {
- requestHeaders []eaclSDK.Header
- objectHeaders []eaclSDK.Header
-
- incompleteObjectHeaders bool
-}
-
-func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) {
- cfg := &cfg{
- storage: os,
- cnr: cnrID,
- msg: xhs,
- }
-
- for i := range opts {
- opts[i](cfg)
- }
-
- if cfg.msg == nil {
- return nil, errors.New("message is not provided")
- }
-
- var res headerSource
-
- err := cfg.readObjectHeaders(&res)
- if err != nil {
- return nil, err
- }
-
- res.requestHeaders = cfg.msg.GetXHeaders()
-
- return res, nil
-}
-
-func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) {
- switch typ {
- default:
- return nil, true
- case eaclSDK.HeaderFromRequest:
- return h.requestHeaders, true
- case eaclSDK.HeaderFromObject:
- return h.objectHeaders, !h.incompleteObjectHeaders
- }
-}
-
-type xHeader session.XHeader
-
-func (x xHeader) Key() string {
- return (*session.XHeader)(&x).GetKey()
-}
-
-func (x xHeader) Value() string {
- return (*session.XHeader)(&x).GetValue()
-}
-
-var errMissingOID = errors.New("object ID is missing")
-
-func (h *cfg) readObjectHeaders(dst *headerSource) error {
- switch m := h.msg.(type) {
- default:
- panic(fmt.Sprintf("unexpected message type %T", h.msg))
- case requestXHeaderSource:
- return h.readObjectHeadersFromRequestXHeaderSource(m, dst)
- case responseXHeaderSource:
- return h.readObjectHeadersResponseXHeaderSource(m, dst)
- }
-}
-
-func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error {
- switch req := m.req.(type) {
- case
- *objectV2.GetRequest,
- *objectV2.HeadRequest:
- if h.obj == nil {
- return errMissingOID
- }
-
- objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
-
- dst.objectHeaders = objHeaders
- dst.incompleteObjectHeaders = !completed
- case
- *objectV2.GetRangeRequest,
- *objectV2.GetRangeHashRequest,
- *objectV2.DeleteRequest:
- if h.obj == nil {
- return errMissingOID
- }
-
- dst.objectHeaders = addressHeaders(h.cnr, h.obj)
- case *objectV2.PutRequest:
- if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- oV2 := new(objectV2.Object)
- oV2.SetObjectID(v.GetObjectID())
- oV2.SetHeader(v.GetHeader())
-
- dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
- }
- case *objectV2.PutSingleRequest:
- dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj)
- case *objectV2.SearchRequest:
- cnrV2 := req.GetBody().GetContainerID()
- var cnr cid.ID
-
- if cnrV2 != nil {
- if err := cnr.ReadFromV2(*cnrV2); err != nil {
- return fmt.Errorf("can't parse container ID: %w", err)
- }
- }
-
- dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
- }
- return nil
-}
-
-func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error {
- switch resp := m.resp.(type) {
- default:
- objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
-
- dst.objectHeaders = objectHeaders
- dst.incompleteObjectHeaders = !completed
- case *objectV2.GetResponse:
- if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
- oV2 := new(objectV2.Object)
- oV2.SetObjectID(v.GetObjectID())
- oV2.SetHeader(v.GetHeader())
-
- dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
- }
- case *objectV2.HeadResponse:
- oV2 := new(objectV2.Object)
-
- var hdr *objectV2.Header
-
- switch v := resp.GetBody().GetHeaderPart().(type) {
- case *objectV2.ShortHeader:
- hdr = new(objectV2.Header)
-
- var idV2 refsV2.ContainerID
- h.cnr.WriteToV2(&idV2)
-
- hdr.SetContainerID(&idV2)
- hdr.SetVersion(v.GetVersion())
- hdr.SetCreationEpoch(v.GetCreationEpoch())
- hdr.SetOwnerID(v.GetOwnerID())
- hdr.SetObjectType(v.GetObjectType())
- hdr.SetPayloadLength(v.GetPayloadLength())
- case *objectV2.HeaderWithSignature:
- hdr = v.GetHeader()
- }
-
- oV2.SetHeader(hdr)
-
- dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
- }
- return nil
-}
-
-func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) {
- if idObj != nil {
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(*idObj)
-
- obj, err := h.storage.Head(context.TODO(), addr)
- if err == nil {
- return headersFromObject(obj, cnr, idObj), true
- }
- }
-
- return addressHeaders(cnr, idObj), false
-}
-
-func cidHeader(idCnr cid.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectContainerID,
- v: idCnr.EncodeToString(),
- }
-}
-
-func oidHeader(obj oid.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectID,
- v: obj.EncodeToString(),
- }
-}
-
-func ownerIDHeader(ownerID user.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectOwnerID,
- v: ownerID.EncodeToString(),
- }
-}
-
-func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
- hh := make([]eaclSDK.Header, 0, 2)
- hh = append(hh, cidHeader(cnr))
-
- if oid != nil {
- hh = append(hh, oidHeader(*oid))
- }
-
- return hh
-}
diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go
deleted file mode 100644
index 92570a3c5..000000000
--- a/pkg/services/object/acl/eacl/v2/object.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package v2
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type sysObjHdr struct {
- k, v string
-}
-
-func (s sysObjHdr) Key() string {
- return s.k
-}
-
-func (s sysObjHdr) Value() string {
- return s.v
-}
-
-func u64Value(v uint64) string {
- return strconv.FormatUint(v, 10)
-}
-
-func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
- var count int
- for obj := obj; obj != nil; obj = obj.Parent() {
- count += 9 + len(obj.Attributes())
- }
-
- res := make([]eaclSDK.Header, 0, count)
- for ; obj != nil; obj = obj.Parent() {
- res = append(res,
- cidHeader(cnr),
- // creation epoch
- sysObjHdr{
- k: acl.FilterObjectCreationEpoch,
- v: u64Value(obj.CreationEpoch()),
- },
- // payload size
- sysObjHdr{
- k: acl.FilterObjectPayloadLength,
- v: u64Value(obj.PayloadSize()),
- },
- // object version
- sysObjHdr{
- k: acl.FilterObjectVersion,
- v: obj.Version().String(),
- },
- // object type
- sysObjHdr{
- k: acl.FilterObjectType,
- v: obj.Type().String(),
- },
- )
-
- if oid != nil {
- res = append(res, oidHeader(*oid))
- }
-
- if idOwner := obj.OwnerID(); !idOwner.IsEmpty() {
- res = append(res, ownerIDHeader(idOwner))
- }
-
- cs, ok := obj.PayloadChecksum()
- if ok {
- res = append(res, sysObjHdr{
- k: acl.FilterObjectPayloadHash,
- v: cs.String(),
- })
- }
-
- cs, ok = obj.PayloadHomomorphicHash()
- if ok {
- res = append(res, sysObjHdr{
- k: acl.FilterObjectHomomorphicHash,
- v: cs.String(),
- })
- }
-
- attrs := obj.Attributes()
- for i := range attrs {
- res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface
- }
- }
-
- return res
-}
diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go
deleted file mode 100644
index d91a21c75..000000000
--- a/pkg/services/object/acl/eacl/v2/opts.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package v2
-
-import (
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func WithOID(v *oid.ID) Option {
- return func(c *cfg) {
- c.obj = v
- }
-}
diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go
deleted file mode 100644
index ce380c117..000000000
--- a/pkg/services/object/acl/eacl/v2/xheader.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
-)
-
-type XHeaderSource interface {
- GetXHeaders() []eaclSDK.Header
-}
-
-type requestXHeaderSource struct {
- req Request
-}
-
-func NewRequestXHeaderSource(req Request) XHeaderSource {
- return requestXHeaderSource{req: req}
-}
-
-type responseXHeaderSource struct {
- resp Response
-
- req Request
-}
-
-func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource {
- return responseXHeaderSource{resp: resp, req: req}
-}
-
-func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header {
- ln := 0
-
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- ln += len(meta.GetXHeaders())
- }
-
- res := make([]eaclSDK.Header, 0, ln)
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- x := meta.GetXHeaders()
- for i := range x {
- res = append(res, (xHeader)(x[i]))
- }
- }
-
- return res
-}
-
-func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header {
- ln := 0
- xHdrs := make([][]session.XHeader, 0)
-
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- x := meta.GetXHeaders()
-
- ln += len(x)
-
- xHdrs = append(xHdrs, x)
- }
-
- res := make([]eaclSDK.Header, 0, ln)
-
- for i := range xHdrs {
- for j := range xHdrs[i] {
- res = append(res, xHeader(xHdrs[i][j]))
- }
- }
-
- return res
-}
diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go
deleted file mode 100644
index 11b9e6e5f..000000000
--- a/pkg/services/object/acl/v2/errors.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package v2
-
-import (
- "fmt"
-
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-const invalidRequestMessage = "malformed request"
-
-func malformedRequestError(reason string) error {
- return fmt.Errorf("%s: %s", invalidRequestMessage, reason)
-}
-
-var (
- errEmptyBody = malformedRequestError("empty body")
- errEmptyVerificationHeader = malformedRequestError("empty verification header")
- errEmptyBodySig = malformedRequestError("empty at body signature")
- errInvalidSessionSig = malformedRequestError("invalid session token signature")
- errInvalidSessionOwner = malformedRequestError("invalid session token owner")
- errInvalidVerb = malformedRequestError("session token verb is invalid")
-)
-
-const (
- accessDeniedACLReasonFmt = "access to operation %s is denied by basic ACL check"
- accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v"
-)
-
-func basicACLErr(info RequestInfo) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedACLReasonFmt, info.operation))
-
- return errAccessDenied
-}
-
-func eACLErr(info RequestInfo, err error) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err))
-
- return errAccessDenied
-}
diff --git a/pkg/services/object/acl/v2/errors_test.go b/pkg/services/object/acl/v2/errors_test.go
deleted file mode 100644
index 2d2b7bc8d..000000000
--- a/pkg/services/object/acl/v2/errors_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package v2
-
-import (
- "errors"
- "testing"
-
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "github.com/stretchr/testify/require"
-)
-
-func TestBasicACLErr(t *testing.T) {
- var reqInfo RequestInfo
- err := basicACLErr(reqInfo)
-
- var errAccessDenied *apistatus.ObjectAccessDenied
-
- require.ErrorAs(t, err, &errAccessDenied,
- "basicACLErr must be able to be casted to apistatus.ObjectAccessDenied")
-}
-
-func TestEACLErr(t *testing.T) {
- var reqInfo RequestInfo
- testErr := errors.New("test-eacl")
- err := eACLErr(reqInfo, testErr)
-
- var errAccessDenied *apistatus.ObjectAccessDenied
-
- require.ErrorAs(t, err, &errAccessDenied,
- "eACLErr must be able to be casted to apistatus.ObjectAccessDenied")
-}
diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go
deleted file mode 100644
index 15fcce884..000000000
--- a/pkg/services/object/acl/v2/opts.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// WithLogger returns option to set logger.
-func WithLogger(v *logger.Logger) Option {
- return func(c *cfg) {
- c.log = v
- }
-}
diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go
deleted file mode 100644
index e35cd2e11..000000000
--- a/pkg/services/object/acl/v2/request.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package v2
-
-import (
- "crypto/ecdsa"
- "fmt"
-
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-// RequestInfo groups parsed version-independent (from SDK library)
-// request information and raw API request.
-type RequestInfo struct {
- basicACL acl.Basic
- requestRole acl.Role
- operation acl.Op // put, get, head, etc.
- cnrOwner user.ID // container owner
-
- // cnrNamespace defined to which namespace a container is belonged.
- cnrNamespace string
-
- idCnr cid.ID
-
- // optional for some request
- // e.g. Put, Search
- obj *oid.ID
-
- senderKey []byte
-
- bearer *bearer.Token // bearer token of request
-
- srcRequest any
-}
-
-func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) {
- r.basicACL = basicACL
-}
-
-func (r *RequestInfo) SetRequestRole(requestRole acl.Role) {
- r.requestRole = requestRole
-}
-
-func (r *RequestInfo) SetSenderKey(senderKey []byte) {
- r.senderKey = senderKey
-}
-
-// Request returns raw API request.
-func (r RequestInfo) Request() any {
- return r.srcRequest
-}
-
-// ContainerOwner returns owner if the container.
-func (r RequestInfo) ContainerOwner() user.ID {
- return r.cnrOwner
-}
-
-func (r RequestInfo) ContainerNamespace() string {
- return r.cnrNamespace
-}
-
-// ObjectID return object ID.
-func (r RequestInfo) ObjectID() *oid.ID {
- return r.obj
-}
-
-// ContainerID return container ID.
-func (r RequestInfo) ContainerID() cid.ID {
- return r.idCnr
-}
-
-// CleanBearer forces cleaning bearer token information.
-func (r *RequestInfo) CleanBearer() {
- r.bearer = nil
-}
-
-// Bearer returns bearer token of the request.
-func (r RequestInfo) Bearer() *bearer.Token {
- return r.bearer
-}
-
-// BasicACL returns basic ACL of the container.
-func (r RequestInfo) BasicACL() acl.Basic {
- return r.basicACL
-}
-
-// SenderKey returns public key of the request's sender.
-func (r RequestInfo) SenderKey() []byte {
- return r.senderKey
-}
-
-// Operation returns request's operation.
-func (r RequestInfo) Operation() acl.Op {
- return r.operation
-}
-
-// RequestRole returns request sender's role.
-func (r RequestInfo) RequestRole() acl.Role {
- return r.requestRole
-}
-
-// IsSoftAPECheck states if APE should perform soft checks.
-// Soft APE check allows a request if CheckAPE returns NoRuleFound for it,
-// otherwise it denies the request.
-func (r RequestInfo) IsSoftAPECheck() bool {
- return r.BasicACL().Bits() != 0
-}
-
-// MetaWithToken groups session and bearer tokens,
-// verification header and raw API request.
-type MetaWithToken struct {
- vheader *sessionV2.RequestVerificationHeader
- token *sessionSDK.Object
- bearer *bearer.Token
- src any
-}
-
-// RequestOwner returns ownerID and its public key
-// according to internal meta information.
-func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) {
- if r.vheader == nil {
- return nil, nil, errEmptyVerificationHeader
- }
-
- if r.bearer != nil && r.bearer.Impersonate() {
- return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes())
- }
-
- // if session token is presented, use it as truth source
- if r.token != nil {
- // verify signature of session token
- return ownerFromToken(r.token)
- }
-
- // otherwise get original body signature
- bodySignature := originalBodySignature(r.vheader)
- if bodySignature == nil {
- return nil, nil, errEmptyBodySig
- }
-
- return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
-}
-
-func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
- key, err := unmarshalPublicKey(rawKey)
- if err != nil {
- return nil, nil, fmt.Errorf("invalid signature key: %w", err)
- }
-
- var idSender user.ID
- user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
-
- return &idSender, key, nil
-}
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
deleted file mode 100644
index e02a3be36..000000000
--- a/pkg/services/object/acl/v2/service.go
+++ /dev/null
@@ -1,919 +0,0 @@
-package v2
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "go.uber.org/zap"
-)
-
-// Service checks basic ACL rules.
-type Service struct {
- *cfg
-
- c objectCore.SenderClassifier
-}
-
-type putStreamBasicChecker struct {
- source *Service
- next object.PutObjectStream
-}
-
-type patchStreamBasicChecker struct {
- source *Service
- next object.PatchObjectStream
- nonFirstSend bool
-}
-
-type getStreamBasicChecker struct {
- checker ACLChecker
-
- object.GetObjectStream
-
- info RequestInfo
-}
-
-type rangeStreamBasicChecker struct {
- checker ACLChecker
-
- object.GetObjectRangeStream
-
- info RequestInfo
-}
-
-type searchStreamBasicChecker struct {
- checker ACLChecker
-
- object.SearchStream
-
- info RequestInfo
-}
-
-// Option represents Service constructor option.
-type Option func(*cfg)
-
-type cfg struct {
- log *logger.Logger
-
- containers container.Source
-
- checker ACLChecker
-
- irFetcher InnerRingFetcher
-
- nm netmap.Source
-
- next object.ServiceServer
-}
-
-// New is a constructor for object ACL checking service.
-func New(next object.ServiceServer,
- nm netmap.Source,
- irf InnerRingFetcher,
- acl ACLChecker,
- cs container.Source,
- opts ...Option,
-) Service {
- cfg := &cfg{
- log: &logger.Logger{Logger: zap.L()},
- next: next,
- nm: nm,
- irFetcher: irf,
- checker: acl,
- containers: cs,
- }
-
- for i := range opts {
- opts[i](cfg)
- }
-
- return Service{
- cfg: cfg,
- c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
- }
-}
-
-// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context.
-// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
-type wrappedGetObjectStream struct {
- object.GetObjectStream
-
- requestInfo RequestInfo
-}
-
-func (w *wrappedGetObjectStream) Context() context.Context {
- return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{
- Namespace: w.requestInfo.ContainerNamespace(),
- ContainerOwner: w.requestInfo.ContainerOwner(),
- SenderKey: w.requestInfo.SenderKey(),
- Role: w.requestInfo.RequestRole(),
- SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
- BearerToken: w.requestInfo.Bearer(),
- })
-}
-
-func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream {
- return &wrappedGetObjectStream{
- GetObjectStream: getObjectStream,
- requestInfo: reqInfo,
- }
-}
-
-// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context.
-// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
-type wrappedRangeStream struct {
- object.GetObjectRangeStream
-
- requestInfo RequestInfo
-}
-
-func (w *wrappedRangeStream) Context() context.Context {
- return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{
- Namespace: w.requestInfo.ContainerNamespace(),
- ContainerOwner: w.requestInfo.ContainerOwner(),
- SenderKey: w.requestInfo.SenderKey(),
- Role: w.requestInfo.RequestRole(),
- SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
- BearerToken: w.requestInfo.Bearer(),
- })
-}
-
-func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream {
- return &wrappedRangeStream{
- GetObjectRangeStream: rangeStream,
- requestInfo: reqInfo,
- }
-}
-
-// wrappedSearchStream propagates RequestContext into SearchStream's context.
-// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
-type wrappedSearchStream struct {
- object.SearchStream
-
- requestInfo RequestInfo
-}
-
-func (w *wrappedSearchStream) Context() context.Context {
- return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{
- Namespace: w.requestInfo.ContainerNamespace(),
- ContainerOwner: w.requestInfo.ContainerOwner(),
- SenderKey: w.requestInfo.SenderKey(),
- Role: w.requestInfo.RequestRole(),
- SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
- BearerToken: w.requestInfo.Bearer(),
- })
-}
-
-func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream {
- return &wrappedSearchStream{
- SearchStream: searchStream,
- requestInfo: reqInfo,
- }
-}
-
-// Get implements ServiceServer interface, makes ACL checks and calls
-// next Get method in the ServiceServer pipeline.
-func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
- }
-
- return b.next.Get(request, &getStreamBasicChecker{
- GetObjectStream: newWrappedGetObjectStreamStream(stream, reqInfo),
- info: reqInfo,
- checker: b.checker,
- })
-}
-
-func (b Service) Put() (object.PutObjectStream, error) {
- streamer, err := b.next.Put()
-
- return putStreamBasicChecker{
- source: &b,
- next: streamer,
- }, err
-}
-
-func (b Service) Patch() (object.PatchObjectStream, error) {
- streamer, err := b.next.Patch()
-
- return &patchStreamBasicChecker{
- source: &b,
- next: streamer,
- }, err
-}
-
-func (b Service) Head(
- ctx context.Context,
- request *objectV2.HeadRequest,
-) (*objectV2.HeadResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
- }
-
- resp, err := b.next.Head(requestContext(ctx, reqInfo), request)
- if err == nil {
- if err = b.checker.CheckEACL(resp, reqInfo); err != nil {
- err = eACLErr(reqInfo, err)
- }
- }
-
- return resp, err
-}
-
-func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
- id, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, id, nil)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch)
- if err != nil {
- return err
- }
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
- }
-
- return b.next.Search(request, &searchStreamBasicChecker{
- checker: b.checker,
- SearchStream: newWrappedSearchStream(stream, reqInfo),
- info: reqInfo,
- })
-}
-
-func (b Service) Delete(
- ctx context.Context,
- request *objectV2.DeleteRequest,
-) (*objectV2.DeleteResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
- }
-
- return b.next.Delete(requestContext(ctx, reqInfo), request)
-}
-
-func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
- }
-
- return b.next.GetRange(request, &rangeStreamBasicChecker{
- checker: b.checker,
- GetObjectRangeStream: newWrappedRangeStream(stream, reqInfo),
- info: reqInfo,
- })
-}
-
-func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context {
- return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{
- Namespace: reqInfo.ContainerNamespace(),
- ContainerOwner: reqInfo.ContainerOwner(),
- SenderKey: reqInfo.SenderKey(),
- Role: reqInfo.RequestRole(),
- SoftAPECheck: reqInfo.IsSoftAPECheck(),
- BearerToken: reqInfo.Bearer(),
- })
-}
-
-func (b Service) GetRangeHash(
- ctx context.Context,
- request *objectV2.GetRangeHashRequest,
-) (*objectV2.GetRangeHashResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
- }
-
- return b.next.GetRangeHash(requestContext(ctx, reqInfo), request)
-}
-
-func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID()
- if idV2 == nil {
- return nil, errors.New("missing object owner")
- }
-
- var idOwner user.ID
-
- err = idOwner.ReadFromV2(*idV2)
- if err != nil {
- return nil, fmt.Errorf("invalid object owner: %w", err)
- }
-
- obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID())
- if err != nil {
- return nil, err
- }
-
- var sTok *sessionSDK.Object
- sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
- if err != nil {
- return nil, err
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectPut)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) || !b.checker.StickyBitCheck(reqInfo, idOwner) {
- return nil, basicACLErr(reqInfo)
- }
- if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
- }
-
- return b.next.PutSingle(requestContext(ctx, reqInfo), request)
-}
-
-func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
- body := request.GetBody()
- if body == nil {
- return errEmptyBody
- }
-
- part := body.GetObjectPart()
- if part, ok := part.(*objectV2.PutObjectPartInit); ok {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- idV2 := part.GetHeader().GetOwnerID()
- if idV2 == nil {
- return errors.New("missing object owner")
- }
-
- var idOwner user.ID
-
- err = idOwner.ReadFromV2(*idV2)
- if err != nil {
- return fmt.Errorf("invalid object owner: %w", err)
- }
-
- objV2 := part.GetObjectID()
- var obj *oid.ID
-
- if objV2 != nil {
- obj = new(oid.ID)
-
- err = obj.ReadFromV2(*objV2)
- if err != nil {
- return err
- }
- }
-
- var sTok *sessionSDK.Object
- sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
- if err != nil {
- return err
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !p.source.checker.CheckBasicACL(reqInfo) || !p.source.checker.StickyBitCheck(reqInfo, idOwner) {
- return basicACLErr(reqInfo)
- }
- }
-
- ctx = requestContext(ctx, reqInfo)
- }
-
- return p.next.Send(ctx, request)
-}
-
-func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
- var sTok *sessionSDK.Object
-
- if tokV2 != nil {
- sTok = new(sessionSDK.Object)
-
- err := sTok.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
- // if session relates to object's removal, we don't check
- // relation of the tombstone to the session here since user
- // can't predict tomb's ID.
- err = assertSessionRelation(*sTok, cnr, nil)
- } else {
- err = assertSessionRelation(*sTok, cnr, obj)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- return sTok, nil
-}
-
-func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
- return p.next.CloseAndRecv(ctx)
-}
-
-func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
- if _, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
- }
-
- return g.GetObjectStream.Send(resp)
-}
-
-func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
-
- return g.GetObjectRangeStream.Send(resp)
-}
-
-func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
-
- return g.SearchStream.Send(resp)
-}
-
-func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
- body := request.GetBody()
- if body == nil {
- return errEmptyBody
- }
-
- if !p.nonFirstSend {
- p.nonFirstSend = true
-
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- objV2 := request.GetBody().GetAddress().GetObjectID()
- if objV2 == nil {
- return errors.New("missing oid")
- }
- obj := new(oid.ID)
- err = obj.ReadFromV2(*objV2)
- if err != nil {
- return err
- }
-
- var sTok *sessionSDK.Object
- sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
- if err != nil {
- return err
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(req, cnr)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- ctx = requestContext(ctx, reqInfo)
- }
-
- return p.next.Send(ctx, request)
-}
-
-func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
- return p.next.CloseAndRecv(ctx)
-}
-
-func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
- cnr, err := b.containers.Get(idCnr) // fetch actual container
- if err != nil {
- return info, err
- }
-
- if req.token != nil {
- currentEpoch, err := b.nm.Epoch()
- if err != nil {
- return info, errors.New("can't fetch current epoch")
- }
- if req.token.ExpiredAt(currentEpoch) {
- return info, new(apistatus.SessionTokenExpired)
- }
- if req.token.InvalidAt(currentEpoch) {
- return info, fmt.Errorf("%s: token is invalid at %d epoch)",
- invalidRequestMessage, currentEpoch)
- }
-
- if !assertVerb(*req.token, op) {
- return info, errInvalidVerb
- }
- }
-
- // find request role and key
- ownerID, ownerKey, err := req.RequestOwner()
- if err != nil {
- return info, err
- }
- res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
- if err != nil {
- return info, err
- }
-
- info.basicACL = cnr.Value.BasicACL()
- info.requestRole = res.Role
- info.operation = op
- info.cnrOwner = cnr.Value.Owner()
- info.idCnr = idCnr
-
- cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
- if hasNamespace {
- info.cnrNamespace = cnrNamespace
- }
-
- // it is assumed that at the moment the key will be valid,
- // otherwise the request would not pass validation
- info.senderKey = res.Key
-
- // add bearer token if it is present in request
- info.bearer = req.bearer
-
- info.srcRequest = req.src
-
- return info, nil
-}
-
-// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert.
-func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) {
- cnr, err := b.containers.Get(idCnr) // fetch actual container
- if err != nil {
- return info, err
- }
-
- if req.token != nil {
- currentEpoch, err := b.nm.Epoch()
- if err != nil {
- return info, errors.New("can't fetch current epoch")
- }
- if req.token.ExpiredAt(currentEpoch) {
- return info, new(apistatus.SessionTokenExpired)
- }
- if req.token.InvalidAt(currentEpoch) {
- return info, fmt.Errorf("%s: token is invalid at %d epoch)",
- invalidRequestMessage, currentEpoch)
- }
- }
-
- // find request role and key
- ownerID, ownerKey, err := req.RequestOwner()
- if err != nil {
- return info, err
- }
- res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
- if err != nil {
- return info, err
- }
-
- info.basicACL = cnr.Value.BasicACL()
- info.requestRole = res.Role
- info.cnrOwner = cnr.Value.Owner()
- info.idCnr = idCnr
-
- cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
- if hasNamespace {
- info.cnrNamespace = cnrNamespace
- }
-
- // it is assumed that at the moment the key will be valid,
- // otherwise the request would not pass validation
- info.senderKey = res.Key
-
- // add bearer token if it is present in request
- info.bearer = req.bearer
-
- info.srcRequest = req.src
-
- return info, nil
-}
diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go
deleted file mode 100644
index 061cd26b6..000000000
--- a/pkg/services/object/acl/v2/types.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-// ACLChecker is an interface that must provide
-// ACL related checks.
-type ACLChecker interface {
- // CheckBasicACL must return true only if request
- // passes basic ACL validation.
- CheckBasicACL(RequestInfo) bool
- // CheckEACL must return non-nil error if request
- // doesn't pass extended ACL validation.
- CheckEACL(any, RequestInfo) error
- // StickyBitCheck must return true only if sticky bit
- // is disabled or enabled but request contains correct
- // owner field.
- StickyBitCheck(RequestInfo, user.ID) bool
-}
-
-// InnerRingFetcher is an interface that must provide
-// Inner Ring information.
-type InnerRingFetcher interface {
- // InnerRingKeys must return list of public keys of
- // the actual inner ring.
- InnerRingKeys() ([][]byte, error)
-}
diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go
deleted file mode 100644
index 4b19cecfe..000000000
--- a/pkg/services/object/acl/v2/util_test.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package v2
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
- aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestOriginalTokens(t *testing.T) {
- sToken := sessiontest.ObjectSigned()
- bToken := bearertest.Token()
-
- pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- require.NoError(t, bToken.Sign(*pk))
-
- var bTokenV2 acl.BearerToken
- bToken.WriteToV2(&bTokenV2)
- // This line is needed because SDK uses some custom format for
- // reserved filters, so `cid.ID` is not converted to string immediately.
- require.NoError(t, bToken.ReadFromV2(bTokenV2))
-
- var sTokenV2 session.Token
- sToken.WriteToV2(&sTokenV2)
-
- for i := range 10 {
- metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
- res, err := originalSessionToken(metaHeaders)
- require.NoError(t, err)
- require.Equal(t, sToken, res, i)
-
- bTok, err := originalBearerToken(metaHeaders)
- require.NoError(t, err)
- require.Equal(t, &bToken, bTok, i)
- }
-}
-
-func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader {
- metaHeader := new(session.RequestMetaHeader)
- metaHeader.SetBearerToken(b)
- metaHeader.SetSessionToken(s)
-
- for range depth {
- link := metaHeader
- metaHeader = new(session.RequestMetaHeader)
- metaHeader.SetOrigin(link)
- }
-
- return metaHeader
-}
-
-func TestIsVerbCompatible(t *testing.T) {
- // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28
- table := map[aclsdk.Op][]sessionSDK.ObjectVerb{
- aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete},
- aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete},
- aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet},
- aclsdk.OpObjectHead: {
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- },
- aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash},
- aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash},
- aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
- }
-
- verbs := []sessionSDK.ObjectVerb{
- sessionSDK.VerbObjectPut,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectSearch,
- }
-
- var tok sessionSDK.Object
-
- for op, list := range table {
- for _, verb := range verbs {
- var contains bool
- for _, v := range list {
- if v == verb {
- contains = true
- break
- }
- }
-
- tok.ForVerb(verb)
-
- require.Equal(t, contains, assertVerb(tok, op),
- "%v in token, %s executing", verb, op)
- }
- }
-}
-
-func TestAssertSessionRelation(t *testing.T) {
- var tok sessionSDK.Object
- cnr := cidtest.ID()
- cnrOther := cidtest.ID()
- obj := oidtest.ID()
- objOther := oidtest.ID()
-
- // make sure ids differ, otherwise test won't work correctly
- require.False(t, cnrOther.Equals(cnr))
- require.False(t, objOther.Equals(obj))
-
- // bind session to the container (required)
- tok.BindContainer(cnr)
-
- // test container-global session
- require.NoError(t, assertSessionRelation(tok, cnr, nil))
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnrOther, nil))
- require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
-
- // limit the session to the particular object
- tok.LimitByObjects(obj)
-
- // test fixed object session (here obj arg must be non-nil everywhere)
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnr, &objOther))
-}
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
index abcd2f4bb..bb6067a37 100644
--- a/pkg/services/object/ape/checker.go
+++ b/pkg/services/object/ape/checker.go
@@ -64,8 +64,8 @@ type Prm struct {
// An encoded container's owner user ID.
ContainerOwner user.ID
- // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
- SoftAPECheck bool
+ // Attributes defined for the container.
+ ContainerAttributes map[string]string
// The request's bearer token. It is used in order to check APE overrides with the token.
BearerToken *bearer.Token
@@ -79,9 +79,10 @@ var errMissingOID = errors.New("object ID is not set")
// CheckAPE prepares an APE-request and checks if it is permitted by policies.
func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
// APE check is ignored for some inter-node requests.
- if prm.Role == nativeschema.PropertyValueContainerRoleContainer {
+ switch prm.Role {
+ case nativeschema.PropertyValueContainerRoleContainer:
return nil
- } else if prm.Role == nativeschema.PropertyValueContainerRoleIR {
+ case nativeschema.PropertyValueContainerRoleIR:
switch prm.Method {
case nativeschema.MethodGetObject,
nativeschema.MethodHeadObject,
@@ -102,13 +103,12 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
return err
}
- return c.checkerCore.CheckAPE(checkercore.CheckPrm{
+ return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{
Request: r,
PublicKey: pub,
Namespace: prm.Namespace,
Container: prm.Container,
ContainerOwner: prm.ContainerOwner,
BearerToken: prm.BearerToken,
- SoftAPECheck: prm.SoftAPECheck,
})
}
diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go
index e03b5750c..97eb2b2d7 100644
--- a/pkg/services/object/ape/checker_test.go
+++ b/pkg/services/object/ape/checker_test.go
@@ -219,7 +219,7 @@ func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
return pk.GetScriptHash()
}
-func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) {
+func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
v, ok := f.subjects[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -227,7 +227,7 @@ func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, e
return v, nil
}
-func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
v, ok := f.subjectsExtended[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -619,21 +619,21 @@ type netmapStub struct {
currentEpoch uint64
}
-func (s *netmapStub) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
+func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
if diff >= s.currentEpoch {
return nil, errors.New("invalid diff")
}
- return s.GetNetMapByEpoch(s.currentEpoch - diff)
+ return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
}
-func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
+func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, errors.New("netmap not found")
}
-func (s *netmapStub) Epoch() (uint64, error) {
+func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
return s.currentEpoch, nil
}
@@ -641,14 +641,14 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container
}
-func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
+func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
-func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
+func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
return nil, nil
}
diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go
index 1b2024ed5..82e660a7f 100644
--- a/pkg/services/object/ape/errors.go
+++ b/pkg/services/object/ape/errors.go
@@ -1,10 +1,34 @@
package ape
import (
+ "errors"
+
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
+var (
+ errMissingContainerID = malformedRequestError("missing container ID")
+ errEmptyVerificationHeader = malformedRequestError("empty verification header")
+ errEmptyBodySig = malformedRequestError("empty at body signature")
+ errInvalidSessionSig = malformedRequestError("invalid session token signature")
+ errInvalidSessionOwner = malformedRequestError("invalid session token owner")
+ errInvalidVerb = malformedRequestError("session token verb is invalid")
+)
+
+func malformedRequestError(reason string) error {
+ invalidArgErr := &apistatus.InvalidArgument{}
+ invalidArgErr.SetMessage(reason)
+ return invalidArgErr
+}
+
func toStatusErr(err error) error {
+ var chRouterErr *checkercore.ChainRouterError
+ if !errors.As(err, &chRouterErr) {
+ errServerInternal := &apistatus.ServerInternal{}
+ apistatus.WriteInternalServerErr(errServerInternal, err)
+ return errServerInternal
+ }
errAccessDenied := &apistatus.ObjectAccessDenied{}
errAccessDenied.WriteReason("ape denied request: " + err.Error())
return errAccessDenied
diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go
new file mode 100644
index 000000000..102985aa6
--- /dev/null
+++ b/pkg/services/object/ape/metadata.go
@@ -0,0 +1,179 @@
+package ape
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+type Metadata struct {
+ Container cid.ID
+ Object *oid.ID
+ MetaHeader *session.RequestMetaHeader
+ VerificationHeader *session.RequestVerificationHeader
+ SessionToken *sessionSDK.Object
+ BearerToken *bearer.Token
+}
+
+func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) {
+ if m.VerificationHeader == nil {
+ return nil, nil, errEmptyVerificationHeader
+ }
+
+ if m.BearerToken != nil && m.BearerToken.Impersonate() {
+ return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes())
+ }
+
+ // if session token is presented, use it as truth source
+ if m.SessionToken != nil {
+ // verify signature of session token
+ return ownerFromToken(m.SessionToken)
+ }
+
+ // otherwise get original body signature
+ bodySignature := originalBodySignature(m.VerificationHeader)
+ if bodySignature == nil {
+ return nil, nil, errEmptyBodySig
+ }
+
+ return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
+}
+
+// RequestInfo contains request information extracted by request metadata.
+type RequestInfo struct {
+ // Role defines under which role this request is executed.
+ // It must be represented only as a constant represented in native schema.
+ Role string
+
+ ContainerOwner user.ID
+
+ ContainerAttributes map[string]string
+
+ // Namespace defines to which namespace a container is belonged.
+ Namespace string
+
+ // HEX-encoded sender key.
+ SenderKey string
+}
+
+type RequestInfoExtractor interface {
+ GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error)
+}
+
+type extractor struct {
+ containers container.Source
+
+ nm netmap.Source
+
+ classifier objectCore.SenderClassifier
+}
+
+func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor {
+ return &extractor{
+ containers: containers,
+ nm: nm,
+ classifier: objectCore.NewSenderClassifier(irFetcher, nm, log),
+ }
+}
+
+func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error {
+ currentEpoch, err := e.nm.Epoch(ctx)
+ if err != nil {
+ return errors.New("can't fetch current epoch")
+ }
+ if sessionToken.ExpiredAt(currentEpoch) {
+ return new(apistatus.SessionTokenExpired)
+ }
+ if sessionToken.InvalidAt(currentEpoch) {
+ return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch)
+ }
+ if !assertVerb(*sessionToken, method) {
+ return errInvalidVerb
+ }
+ return nil
+}
+
+func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) {
+ cnr, err := e.containers.Get(ctx, m.Container)
+ if err != nil {
+ return ri, err
+ }
+
+ if m.SessionToken != nil {
+ if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil {
+ return ri, err
+ }
+ }
+
+ ownerID, ownerKey, err := m.RequestOwner()
+ if err != nil {
+ return ri, err
+ }
+ res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value)
+ if err != nil {
+ return ri, err
+ }
+
+ ri.Role = nativeSchemaRole(res.Role)
+ ri.ContainerOwner = cnr.Value.Owner()
+
+ ri.ContainerAttributes = map[string]string{}
+ for key, val := range cnr.Value.Attributes() {
+ ri.ContainerAttributes[key] = val
+ }
+
+ cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
+ if hasNamespace {
+ ri.Namespace = cnrNamespace
+ }
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ ri.SenderKey = hex.EncodeToString(res.Key)
+
+ return ri, nil
+}
+
+func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
+ var sTok *sessionSDK.Object
+
+ if tokV2 != nil {
+ sTok = new(sessionSDK.Object)
+
+ err := sTok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
+ // if session relates to object's removal, we don't check
+ // relation of the tombstone to the session here since user
+ // can't predict tomb's ID.
+ err = assertSessionRelation(*sTok, cnr, nil)
+ } else {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return sTok, nil
+}
diff --git a/pkg/services/object/acl/v2/request_test.go b/pkg/services/object/ape/metadata_test.go
similarity index 83%
rename from pkg/services/object/acl/v2/request_test.go
rename to pkg/services/object/ape/metadata_test.go
index 618af3469..fd919008f 100644
--- a/pkg/services/object/acl/v2/request_test.go
+++ b/pkg/services/object/ape/metadata_test.go
@@ -1,4 +1,4 @@
-package v2
+package ape
import (
"testing"
@@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) {
vh.SetBodySignature(&userSignature)
t.Run("empty verification header", func(t *testing.T) {
- req := MetaWithToken{}
+ req := Metadata{}
checkOwner(t, req, nil, errEmptyVerificationHeader)
})
t.Run("empty verification header signature", func(t *testing.T) {
- req := MetaWithToken{
- vheader: new(sessionV2.RequestVerificationHeader),
+ req := Metadata{
+ VerificationHeader: new(sessionV2.RequestVerificationHeader),
}
checkOwner(t, req, nil, errEmptyBodySig)
})
t.Run("no tokens", func(t *testing.T) {
- req := MetaWithToken{
- vheader: vh,
+ req := Metadata{
+ VerificationHeader: vh,
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer without impersonate, no session", func(t *testing.T) {
- req := MetaWithToken{
- vheader: vh,
- bearer: newBearer(t, containerOwner, userID, false),
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, false),
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer with impersonate, no session", func(t *testing.T) {
- req := MetaWithToken{
- vheader: vh,
- bearer: newBearer(t, containerOwner, userID, true),
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, true),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
- req := MetaWithToken{
- vheader: vh,
- bearer: newBearer(t, containerOwner, userID, true),
- token: newSession(t, pk),
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, true),
+ SessionToken: newSession(t, pk),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
t.Run("with session", func(t *testing.T) {
- req := MetaWithToken{
- vheader: vh,
- token: newSession(t, containerOwner),
+ req := Metadata{
+ VerificationHeader: vh,
+ SessionToken: newSession(t, containerOwner),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) {
var tok sessionSDK.Object
require.NoError(t, tok.ReadFromV2(tokV2))
- req := MetaWithToken{
- vheader: vh,
- token: &tok,
+ req := Metadata{
+ VerificationHeader: vh,
+ SessionToken: &tok,
}
checkOwner(t, req, nil, errInvalidSessionOwner)
})
@@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool
return &tok
}
-func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) {
+func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) {
_, actual, err := req.RequestOwner()
if expectedErr != nil {
require.ErrorIs(t, err, expectedErr)
diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go
index cb9bbf1b8..39dd7f476 100644
--- a/pkg/services/object/ape/request.go
+++ b/pkg/services/object/ape/request.go
@@ -57,11 +57,16 @@ func resourceName(cid cid.ID, oid *oid.ID, namespace string) string {
}
// objectProperties collects object properties from address parameters and a header if it is passed.
-func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string {
+func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string {
objectProps := map[string]string{
nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(),
}
+ for attrName, attrValue := range cnrAttrs {
+ prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName)
+ objectProps[prop] = attrValue
+ }
+
objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString()
if oid != nil {
@@ -140,7 +145,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
reqProps[xheadKey] = xhead.GetValue()
}
- reqProps, err = c.fillWithUserClaimTags(reqProps, prm)
+ reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm)
if err != nil {
return defaultRequest, err
}
@@ -155,7 +160,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
prm.Method,
aperequest.NewResource(
resourceName(prm.Container, prm.Object, prm.Namespace),
- objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header),
+ objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header),
),
reqProps,
), nil
@@ -177,7 +182,7 @@ func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, heade
return nil, fmt.Errorf("EC parent object ID format error: %w", err)
}
// only container node have access to collect parent object
- contNode, err := c.currentNodeIsContainerNode(prm.Container)
+ contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container)
if err != nil {
return nil, fmt.Errorf("check container node status: %w", err)
}
@@ -200,13 +205,13 @@ func isLogicalError(err error) bool {
return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound)
}
-func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) {
- cnr, err := c.cnrSource.Get(cnrID)
+func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) {
+ cnr, err := c.cnrSource.Get(ctx, cnrID)
if err != nil {
return false, err
}
- nm, err := netmap.GetLatestNetworkMap(c.nm)
+ nm, err := netmap.GetLatestNetworkMap(ctx, c.nm)
if err != nil {
return false, err
}
@@ -220,7 +225,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) {
return true, nil
}
- nm, err = netmap.GetPreviousNetworkMap(c.nm)
+ nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm)
if err != nil {
return false, err
}
@@ -229,7 +234,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) {
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) (map[string]string, error) {
+func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
@@ -237,7 +242,7 @@ func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm)
if err != nil {
return nil, err
}
- props, err := aperequest.FormFrostfsIDRequestProperties(c.frostFSIDClient, pk)
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go
index 787785b60..fcf7c4c40 100644
--- a/pkg/services/object/ape/request_test.go
+++ b/pkg/services/object/ape/request_test.go
@@ -7,6 +7,7 @@ import (
"testing"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -19,11 +20,20 @@ import (
)
const (
- testOwnerID = "FPPtmAi9TCX329"
+ testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y"
incomingIP = "192.92.33.1"
+
+ testSysAttrName = "unittest"
+
+ testSysAttrZone = "eggplant"
)
+var containerAttrs = map[string]string{
+ cnrV2.SysAttributeName: testSysAttrName,
+ cnrV2.SysAttributeZone: testSysAttrZone,
+}
+
func ctxWithPeerInfo() context.Context {
return peer.NewContext(context.Background(), &peer.Peer{
Addr: &net.TCPAddr{
@@ -105,7 +115,7 @@ func TestObjectProperties(t *testing.T) {
var testCnrOwner user.ID
require.NoError(t, testCnrOwner.DecodeString(testOwnerID))
- props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader())
+ props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader())
require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID])
require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID])
@@ -124,6 +134,8 @@ func TestObjectProperties(t *testing.T) {
require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType])
require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash])
require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash])
+ require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)])
+ require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)])
for _, attr := range test.header.attributes {
require.Equal(t, attr.val, props[attr.key])
@@ -245,6 +257,10 @@ func TestNewAPERequest(t *testing.T) {
Role: role,
SenderKey: senderKey,
ContainerOwner: testCnrOwner,
+ ContainerAttributes: map[string]string{
+ cnrV2.SysAttributeZone: testSysAttrZone,
+ cnrV2.SysAttributeName: testSysAttrName,
+ },
}
headerSource := newHeaderProviderMock()
@@ -277,7 +293,7 @@ func TestNewAPERequest(t *testing.T) {
method,
aperequest.NewResource(
resourceName(cnr, obj, prm.Namespace),
- objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
+ objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header {
if headerObjSDK != nil {
return headerObjSDK.ToV2().GetHeader()
}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index c114f02f6..5e04843f3 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -2,9 +2,6 @@ package ape
import (
"context"
- "encoding/hex"
- "errors"
- "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@@ -12,19 +9,18 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
)
-var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
-
type Service struct {
apeChecker Checker
+ extractor RequestInfoExtractor
+
next objectSvc.ServiceServer
}
@@ -64,9 +60,10 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
}
}
-func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service {
+func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service {
return &Service{
apeChecker: apeChecker,
+ extractor: extractor,
next: next,
}
}
@@ -76,17 +73,9 @@ type getStreamBasicChecker struct {
apeChecker Checker
- namespace string
+ metadata Metadata
- senderKey []byte
-
- containerOwner user.ID
-
- role string
-
- softAPECheck bool
-
- bearerToken *bearer.Token
+ reqInfo RequestInfo
}
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
@@ -97,17 +86,17 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
}
prm := Prm{
- Namespace: g.namespace,
- Container: cnrID,
- Object: objID,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodGetObject,
- SenderKey: hex.EncodeToString(g.senderKey),
- ContainerOwner: g.containerOwner,
- Role: g.role,
- SoftAPECheck: g.softAPECheck,
- BearerToken: g.bearerToken,
- XHeaders: resp.GetMetaHeader().GetXHeaders(),
+ Namespace: g.reqInfo.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodGetObject,
+ SenderKey: g.reqInfo.SenderKey,
+ ContainerOwner: g.reqInfo.ContainerOwner,
+ ContainerAttributes: g.reqInfo.ContainerAttributes,
+ Role: g.reqInfo.Role,
+ BearerToken: g.metadata.BearerToken,
+ XHeaders: resp.GetMetaHeader().GetXHeaders(),
}
if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil {
@@ -117,66 +106,54 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
return g.GetObjectStream.Send(resp)
}
-func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) {
- untyped := ctx.Value(objectSvc.RequestContextKey)
- if untyped == nil {
- return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey)
- }
- rc, ok := untyped.(*objectSvc.RequestContext)
- if !ok {
- return nil, errFailedToCastToRequestContext
- }
- return rc, nil
-}
-
func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
- reqCtx, err := requestContext(stream.Context())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return toStatusErr(err)
+ return err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject)
+ if err != nil {
+ return err
}
-
return c.next.Get(request, &getStreamBasicChecker{
GetObjectStream: stream,
apeChecker: c.apeChecker,
- namespace: reqCtx.Namespace,
- senderKey: reqCtx.SenderKey,
- containerOwner: reqCtx.ContainerOwner,
- role: nativeSchemaRole(reqCtx.Role),
- softAPECheck: reqCtx.SoftAPECheck,
- bearerToken: reqCtx.BearerToken,
+ metadata: md,
+ reqInfo: reqInfo,
})
}
type putStreamBasicChecker struct {
apeChecker Checker
+ extractor RequestInfoExtractor
+
next objectSvc.PutObjectStream
}
func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- reqCtx, err := requestContext(ctx)
+ md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
if err != nil {
- return toStatusErr(err)
+ return err
}
-
- cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
+ reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
if err != nil {
- return toStatusErr(err)
+ return err
}
prm := Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodPutObject,
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- Role: nativeSchemaRole(reqCtx.Role),
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ Role: reqInfo.Role,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -191,11 +168,12 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR
return p.next.CloseAndRecv(ctx)
}
-func (c *Service) Put() (objectSvc.PutObjectStream, error) {
- streamer, err := c.next.Put()
+func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
+ streamer, err := c.next.Put(ctx)
return &putStreamBasicChecker{
apeChecker: c.apeChecker,
+ extractor: c.extractor,
next: streamer,
}, err
}
@@ -203,6 +181,8 @@ func (c *Service) Put() (objectSvc.PutObjectStream, error) {
type patchStreamBasicChecker struct {
apeChecker Checker
+ extractor RequestInfoExtractor
+
next objectSvc.PatchObjectStream
nonFirstSend bool
@@ -212,27 +192,26 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa
if !p.nonFirstSend {
p.nonFirstSend = true
- reqCtx, err := requestContext(ctx)
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return toStatusErr(err)
+ return err
}
-
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject)
if err != nil {
- return toStatusErr(err)
+ return err
}
prm := Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Method: nativeschema.MethodPatchObject,
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- Role: nativeSchemaRole(reqCtx.Role),
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodPatchObject,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ Role: reqInfo.Role,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -247,22 +226,22 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa
return p.next.CloseAndRecv(ctx)
}
-func (c *Service) Patch() (objectSvc.PatchObjectStream, error) {
- streamer, err := c.next.Patch()
+func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) {
+ streamer, err := c.next.Patch(ctx)
return &patchStreamBasicChecker{
apeChecker: c.apeChecker,
+ extractor: c.extractor,
next: streamer,
}, err
}
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
-
- reqCtx, err := requestContext(ctx)
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject)
if err != nil {
return nil, err
}
@@ -276,7 +255,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
switch headerPart := resp.GetBody().GetHeaderPart().(type) {
case *objectV2.ShortHeader:
cidV2 := new(refs.ContainerID)
- cnrID.WriteToV2(cidV2)
+ md.Container.WriteToV2(cidV2)
header.SetContainerID(cidV2)
header.SetVersion(headerPart.GetVersion())
header.SetCreationEpoch(headerPart.GetCreationEpoch())
@@ -292,17 +271,17 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Header: header,
- Method: nativeschema.MethodHeadObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: header,
+ Method: nativeschema.MethodHeadObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@@ -311,28 +290,25 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
- var cnrID cid.ID
- if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil {
- if err := cnrID.ReadFromV2(*cnrV2); err != nil {
- return toStatusErr(err)
- }
- }
-
- reqCtx, err := requestContext(stream.Context())
+ md, err := newMetadata(request, request.GetBody().GetContainerID(), nil)
if err != nil {
- return toStatusErr(err)
+ return err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject)
+ if err != nil {
+ return err
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Method: nativeschema.MethodSearchObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Method: nativeschema.MethodSearchObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@@ -342,27 +318,26 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc
}
func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
-
- reqCtx, err := requestContext(ctx)
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject)
if err != nil {
return nil, err
}
err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Method: nativeschema.MethodDeleteObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodDeleteObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@@ -377,27 +352,26 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (
}
func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return toStatusErr(err)
+ return err
}
-
- reqCtx, err := requestContext(stream.Context())
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject)
if err != nil {
- return toStatusErr(err)
+ return err
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Method: nativeschema.MethodRangeObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodRangeObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@@ -407,27 +381,26 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G
}
func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
-
- reqCtx, err := requestContext(ctx)
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject)
if err != nil {
return nil, err
}
prm := Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Method: nativeschema.MethodHashObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodHashObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
}
resp, err := c.next.GetRangeHash(ctx, request)
@@ -442,28 +415,27 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa
}
func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
if err != nil {
return nil, err
}
-
- reqCtx, err := requestContext(ctx)
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
if err != nil {
return nil, err
}
prm := Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Header: request.GetBody().GetObject().GetHeader(),
- Method: nativeschema.MethodPutObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: request.GetBody().GetObject().GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
}
if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -473,18 +445,36 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ
return c.next.PutSingle(ctx, request)
}
-func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
- if cidV2 != nil {
- if err = cnrID.ReadFromV2(*cidV2); err != nil {
- return
- }
+type request interface {
+ GetMetaHeader() *session.RequestMetaHeader
+ GetVerificationHeader() *session.RequestVerificationHeader
+}
+
+func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) {
+ meta := request.GetMetaHeader()
+ for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
+ meta = origin
}
- if objV2 != nil {
- objID = new(oid.ID)
- if err = objID.ReadFromV2(*objV2); err != nil {
- return
- }
+ cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2)
+ if err != nil {
+ return
+ }
+ session, err := readSessionToken(cnrID, objID, meta.GetSessionToken())
+ if err != nil {
+ return
+ }
+ bearer, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return
+ }
+
+ md = Metadata{
+ Container: cnrID,
+ Object: objID,
+ VerificationHeader: request.GetVerificationHeader(),
+ SessionToken: session,
+ BearerToken: bearer,
}
return
}
diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go
index 46e55360d..97dbfa658 100644
--- a/pkg/services/object/ape/types.go
+++ b/pkg/services/object/ape/types.go
@@ -7,3 +7,11 @@ import "context"
type Checker interface {
CheckAPE(context.Context, Prm) error
}
+
+// InnerRingFetcher is an interface that must provide
+// Inner Ring information.
+type InnerRingFetcher interface {
+ // InnerRingKeys must return list of public keys of
+ // the actual inner ring.
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
+}
diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/ape/util.go
similarity index 58%
rename from pkg/services/object/acl/v2/util.go
rename to pkg/services/object/ape/util.go
index e02f70771..5cd2caa50 100644
--- a/pkg/services/object/acl/v2/util.go
+++ b/pkg/services/object/ape/util.go
@@ -1,4 +1,4 @@
-package v2
+package ape
import (
"crypto/ecdsa"
@@ -6,57 +6,34 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-var errMissingContainerID = errors.New("missing container ID")
-
-func getContainerIDFromRequest(req any) (cid.ID, error) {
- var idV2 *refsV2.ContainerID
- var id cid.ID
-
- switch v := req.(type) {
- case *objectV2.GetRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.PutRequest:
- part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit)
- if !ok {
- return cid.ID{}, errors.New("can't get container ID in chunk")
+func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
+ if cidV2 != nil {
+ if err = cnrID.ReadFromV2(*cidV2); err != nil {
+ return
}
-
- idV2 = part.GetHeader().GetContainerID()
- case *objectV2.HeadRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.SearchRequest:
- idV2 = v.GetBody().GetContainerID()
- case *objectV2.DeleteRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.GetRangeRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.GetRangeHashRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.PutSingleRequest:
- idV2 = v.GetBody().GetObject().GetHeader().GetContainerID()
- case *objectV2.PatchRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- default:
- return cid.ID{}, errors.New("unknown request type")
+ } else {
+ err = errMissingContainerID
+ return
}
- if idV2 == nil {
- return cid.ID{}, errMissingContainerID
+ if objV2 != nil {
+ objID = new(oid.ID)
+ if err = objID.ReadFromV2(*objV2); err != nil {
+ return
+ }
}
-
- return id, id.ReadFromV2(*idV2)
+ return
}
// originalBearerToken goes down to original request meta header and fetches
@@ -75,50 +52,6 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er
return &tok, tok.ReadFromV2(*tokV2)
}
-// originalSessionToken goes down to original request meta header and fetches
-// session token from there.
-func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) {
- for header.GetOrigin() != nil {
- header = header.GetOrigin()
- }
-
- tokV2 := header.GetSessionToken()
- if tokV2 == nil {
- return nil, nil
- }
-
- var tok sessionSDK.Object
-
- err := tok.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- return &tok, nil
-}
-
-// getObjectIDFromRequestBody decodes oid.ID from the common interface of the
-// object reference's holders. Returns an error if object ID is missing in the request.
-func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) {
- idV2 := body.GetAddress().GetObjectID()
- return getObjectIDFromRefObjectID(idV2)
-}
-
-func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) {
- if idV2 == nil {
- return nil, errors.New("missing object ID")
- }
-
- var id oid.ID
-
- err := id.ReadFromV2(*idV2)
- if err != nil {
- return nil, err
- }
-
- return &id, nil
-}
-
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
// 1. First check signature of session token.
if !token.VerifySignature() {
@@ -172,16 +105,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-// assertVerb checks that token verb corresponds to op.
-func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
- switch op {
- case acl.OpObjectPut:
+// assertVerb checks that token verb corresponds to the method.
+func assertVerb(tok sessionSDK.Object, method string) bool {
+ switch method {
+ case nativeschema.MethodPutObject:
return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
- case acl.OpObjectDelete:
+ case nativeschema.MethodDeleteObject:
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
- case acl.OpObjectGet:
+ case nativeschema.MethodGetObject:
return tok.AssertVerb(sessionSDK.VerbObjectGet)
- case acl.OpObjectHead:
+ case nativeschema.MethodHeadObject:
return tok.AssertVerb(
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
@@ -190,14 +123,15 @@ func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
sessionSDK.VerbObjectRangeHash,
sessionSDK.VerbObjectPatch,
)
- case acl.OpObjectSearch:
+ case nativeschema.MethodSearchObject:
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
- case acl.OpObjectRange:
+ case nativeschema.MethodRangeObject:
return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
- case acl.OpObjectHash:
+ case nativeschema.MethodHashObject:
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
+ case nativeschema.MethodPatchObject:
+ return tok.AssertVerb(sessionSDK.VerbObjectPatch)
}
-
return false
}
@@ -221,3 +155,15 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error
return nil
}
+
+func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
+ key, err := unmarshalPublicKey(rawKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var idSender user.ID
+ user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
+
+ return &idSender, key, nil
+}
diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go
new file mode 100644
index 000000000..916bce427
--- /dev/null
+++ b/pkg/services/object/ape/util_test.go
@@ -0,0 +1,84 @@
+package ape
+
+import (
+ "slices"
+ "testing"
+
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsVerbCompatible(t *testing.T) {
+ table := map[string][]sessionSDK.ObjectVerb{
+ nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch},
+ nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete},
+ nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet},
+ nativeschema.MethodHeadObject: {
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectPatch,
+ },
+ nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch},
+ nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash},
+ nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
+ nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch},
+ }
+
+ verbs := []sessionSDK.ObjectVerb{
+ sessionSDK.VerbObjectPut,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectSearch,
+ sessionSDK.VerbObjectPatch,
+ }
+
+ var tok sessionSDK.Object
+
+ for op, list := range table {
+ for _, verb := range verbs {
+ contains := slices.Contains(list, verb)
+
+ tok.ForVerb(verb)
+
+ require.Equal(t, contains, assertVerb(tok, op),
+ "%v in token, %s executing", verb, op)
+ }
+ }
+}
+
+func TestAssertSessionRelation(t *testing.T) {
+ var tok sessionSDK.Object
+ cnr := cidtest.ID()
+ cnrOther := cidtest.ID()
+ obj := oidtest.ID()
+ objOther := oidtest.ID()
+
+ // make sure ids differ, otherwise test won't work correctly
+ require.False(t, cnrOther.Equals(cnr))
+ require.False(t, objOther.Equals(obj))
+
+ // bind session to the container (required)
+ tok.BindContainer(cnr)
+
+ // test container-global session
+ require.NoError(t, assertSessionRelation(tok, cnr, nil))
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnrOther, nil))
+ require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
+
+ // limit the session to the particular object
+ tok.LimitByObjects(obj)
+
+ // test fixed object session (here obj arg must be non-nil everywhere)
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnr, &objOther))
+}
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
index b42084634..f8ee089fe 100644
--- a/pkg/services/object/audit.go
+++ b/pkg/services/object/audit.go
@@ -37,7 +37,7 @@ func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (*
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return res, err
}
@@ -48,7 +48,7 @@ func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error
if !a.enabled.Load() {
return err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return err
}
@@ -59,7 +59,7 @@ func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRan
if !a.enabled.Load() {
return err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return err
}
@@ -70,7 +70,7 @@ func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHas
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return resp, err
}
@@ -81,19 +81,19 @@ func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*obje
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return resp, err
}
// Put implements ServiceServer.
-func (a *auditService) Put() (PutObjectStream, error) {
- res, err := a.next.Put()
+func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) {
+ res, err := a.next.Put(ctx)
if !a.enabled.Load() {
return res, err
}
if err != nil {
- audit.LogRequest(a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
return res, err
}
return &auditPutStream{
@@ -108,7 +108,7 @@ func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleReque
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(),
req.GetBody().GetObject().GetObjectID()),
err == nil)
@@ -121,7 +121,7 @@ func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) er
if !a.enabled.Load() {
return err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return err
}
@@ -145,7 +145,7 @@ func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse,
a.failed = true
}
a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
return resp, err
@@ -163,8 +163,8 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error
if err != nil {
a.failed = true
}
- if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
}
@@ -183,13 +183,13 @@ type auditPatchStream struct {
nonFirstSend bool
}
-func (a *auditService) Patch() (PatchObjectStream, error) {
- res, err := a.next.Patch()
+func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ res, err := a.next.Patch(ctx)
if !a.enabled.Load() {
return res, err
}
if err != nil {
- audit.LogRequest(a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
return res, err
}
return &auditPatchStream{
@@ -205,7 +205,7 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo
a.failed = true
}
a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
return resp, err
@@ -224,8 +224,8 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e
if err != nil {
a.failed = true
}
- if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
}
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index 758156607..ef65e78bc 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -40,20 +40,20 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
return x.nextHandler.Get(req, stream)
}
-func (x *Common) Put() (PutObjectStream, error) {
+func (x *Common) Put(ctx context.Context) (PutObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Put()
+ return x.nextHandler.Put(ctx)
}
-func (x *Common) Patch() (PatchObjectStream, error) {
+func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Patch()
+ return x.nextHandler.Patch(ctx)
}
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index 9e0f49297..f2bd907db 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -1,6 +1,7 @@
package target
import (
+ "context"
"errors"
"fmt"
@@ -13,20 +14,20 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-func New(prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
// prepare needed put parameters
- if err := preparePrm(&prm); err != nil {
+ if err := preparePrm(ctx, &prm); err != nil {
return nil, fmt.Errorf("could not prepare put parameters: %w", err)
}
if prm.Header.Signature() != nil {
- return newUntrustedTarget(&prm)
+ return newUntrustedTarget(ctx, &prm)
}
- return newTrustedTarget(&prm)
+ return newTrustedTarget(ctx, &prm)
}
-func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
+func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
}
@@ -48,9 +49,9 @@ func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWrit
}, nil
}
-func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
prm.Relay = nil // do not relay request without signature
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
}
@@ -88,10 +89,8 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter
if !ownerObj.Equals(ownerSession) {
return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession)
}
- } else {
- if !ownerObj.Equals(sessionInfo.Owner) {
- return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
- }
+ } else if !ownerObj.Equals(sessionInfo.Owner) {
+ return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
}
if prm.SignRequestPrivateKey == nil {
@@ -111,11 +110,11 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter
}, nil
}
-func preparePrm(prm *objectwriter.Params) error {
+func preparePrm(ctx context.Context, prm *objectwriter.Params) error {
var err error
// get latest network map
- nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource)
+ nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource)
if err != nil {
return fmt.Errorf("could not get latest network map: %w", err)
}
@@ -126,7 +125,7 @@ func preparePrm(prm *objectwriter.Params) error {
}
// get container to store the object
- cnrInfo, err := prm.Config.ContainerSource.Get(idCnr)
+ cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr)
if err != nil {
return fmt.Errorf("could not get container by ID: %w", err)
}
diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go
index 6689557ee..6593d3ca0 100644
--- a/pkg/services/object/common/writer/common.go
+++ b/pkg/services/object/common/writer/common.go
@@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
}
func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
- traverser, err := placement.NewTraverser(n.Traversal.Opts...)
+ traverser, err := placement.NewTraverser(ctx, n.Opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
@@ -56,10 +56,10 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
}
// perform additional container broadcast if needed
- if n.Traversal.submitPrimaryPlacementFinish() {
+ if n.submitPrimaryPlacementFinish() {
err := n.ForEachNode(ctx, f)
if err != nil {
- n.cfg.Logger.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+ n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -79,33 +79,29 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.
continue
}
- workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey())
+ isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey())
item := new(bool)
wg.Add(1)
- if err := workerPool.Submit(func() {
+ go func() {
defer wg.Done()
err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr})
if err != nil {
resErr.Store(err)
- svcutil.LogServiceError(n.cfg.Logger, "PUT", addr.Addresses(), err)
+ svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
- }); err != nil {
- wg.Done()
- svcutil.LogWorkerPoolError(n.cfg.Logger, "PUT", err)
- return true
- }
+ }()
// Mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
- n.Traversal.submitProcessed(addr, item)
+ n.submitProcessed(addr, item)
}
wg.Wait()
diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go
index f7486eae7..fff58aca7 100644
--- a/pkg/services/object/common/writer/distributed.go
+++ b/pkg/services/object/common/writer/distributed.go
@@ -95,6 +95,10 @@ func (x errIncompletePut) Error() string {
return commonMsg
}
+func (x errIncompletePut) Unwrap() error {
+ return x.singleErr
+}
+
// WriteObject implements the transformer.ObjectWriter interface.
func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
t.obj = obj
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index 571bae7bb..26a53e315 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -14,6 +14,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
@@ -84,7 +85,7 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
}
func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) {
- currentNodeIsContainerNode, err := e.currentNodeIsContainerNode()
+ currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx)
if err != nil {
return false, false, err
}
@@ -107,8 +108,8 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O
return true, currentNodeIsContainerNode, nil
}
-func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
- t, err := placement.NewTraverser(e.PlacementOpts...)
+func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) {
+ t, err := placement.NewTraverser(ctx, e.PlacementOpts...)
if err != nil {
return false, err
}
@@ -127,7 +128,7 @@ func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
}
func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
- t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -148,21 +149,11 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
- completed := make(chan interface{})
- if poolErr := e.Config.RemotePool.Submit(func() {
- defer close(completed)
- err = e.Relay(ctx, info, c)
- }); poolErr != nil {
- close(completed)
- svcutil.LogWorkerPoolError(e.Config.Logger, "PUT", poolErr)
- return poolErr
- }
- <-completed
-
+ err = e.Relay(ctx, info, c)
if err == nil {
return nil
}
- e.Config.Logger.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
lastErr = err
}
}
@@ -179,7 +170,7 @@ func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error
return e.writePartLocal(ctx, obj)
}
- t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
if err != nil {
return err
}
@@ -216,7 +207,7 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
}
partsProcessed := make([]atomic.Bool, len(parts))
objID, _ := obj.ID()
- t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -274,8 +265,10 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
err := e.putECPartToNode(ctx, obj, node)
if err == nil {
return nil
+ } else if clientSDK.IsErrObjectAlreadyRemoved(err) {
+ return err
}
- e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
@@ -299,7 +292,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -323,7 +316,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -340,21 +333,11 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n
}
func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
- var err error
localTarget := LocalTarget{
Storage: e.Config.LocalStore,
Container: e.Container,
}
- completed := make(chan interface{})
- if poolErr := e.Config.LocalPool.Submit(func() {
- defer close(completed)
- err = localTarget.WriteObject(ctx, obj, e.ObjectMeta)
- }); poolErr != nil {
- close(completed)
- return poolErr
- }
- <-completed
- return err
+ return localTarget.WriteObject(ctx, obj, e.ObjectMeta)
}
func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
@@ -368,15 +351,5 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n
nodeInfo: clientNodeInfo,
}
- var err error
- completed := make(chan interface{})
- if poolErr := e.Config.RemotePool.Submit(func() {
- defer close(completed)
- err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
- }); poolErr != nil {
- close(completed)
- return poolErr
- }
- <-completed
- return err
+ return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
}
diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go
index 8b2599e5f..d5eeddf21 100644
--- a/pkg/services/object/common/writer/ec_test.go
+++ b/pkg/services/object/common/writer/ec_test.go
@@ -7,6 +7,7 @@ import (
"crypto/sha256"
"errors"
"fmt"
+ "slices"
"strconv"
"testing"
@@ -30,7 +31,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
)
@@ -38,11 +38,10 @@ type testPlacementBuilder struct {
vectors [][]netmap.NodeInfo
}
-func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
[][]netmap.NodeInfo, error,
) {
- arr := make([]netmap.NodeInfo, len(p.vectors[0]))
- copy(arr, p.vectors[0])
+ arr := slices.Clone(p.vectors[0])
return [][]netmap.NodeInfo{arr}, nil
}
@@ -131,17 +130,13 @@ func TestECWriter(t *testing.T) {
nodeKey, err := keys.NewPrivateKey()
require.NoError(t, err)
- pool, err := ants.NewPool(4, ants.WithNonblocking(true))
- require.NoError(t, err)
-
- log, err := logger.NewLogger(nil)
+ log, err := logger.NewLogger(logger.Prm{})
require.NoError(t, err)
var n nmKeys
ecw := ECWriter{
Config: &Config{
NetmapKeys: n,
- RemotePool: pool,
Logger: log,
ClientConstructor: clientConstructor{vectors: ns},
KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil),
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
index 0e4c4d9c6..d3d2b41b4 100644
--- a/pkg/services/object/common/writer/writer.go
+++ b/pkg/services/object/common/writer/writer.go
@@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -24,7 +23,7 @@ type MaxSizeSource interface {
// of physically stored object in system.
//
// Must return 0 if value can not be obtained.
- MaxObjectSize() uint64
+ MaxObjectSize(context.Context) uint64
}
type ClientConstructor interface {
@@ -32,7 +31,7 @@ type ClientConstructor interface {
}
type InnerRing interface {
- InnerRingKeys() ([][]byte, error)
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
}
type FormatValidatorConfig interface {
@@ -52,8 +51,6 @@ type Config struct {
NetmapSource netmap.Source
- RemotePool, LocalPool util.WorkerPool
-
NetmapKeys netmap.AnnouncedKeys
FormatValidator *object.FormatValidator
@@ -69,12 +66,6 @@ type Config struct {
type Option func(*Config)
-func WithWorkerPools(remote, local util.WorkerPool) Option {
- return func(c *Config) {
- c.RemotePool, c.LocalPool = remote, local
- }
-}
-
func WithLogger(l *logger.Logger) Option {
return func(c *Config) {
c.Logger = l
@@ -87,13 +78,6 @@ func WithVerifySessionTokenIssuer(v bool) Option {
}
}
-func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) {
- if c.NetmapKeys.IsLocalKey(pub) {
- return c.LocalPool, true
- }
- return c.RemotePool, false
-}
-
type Params struct {
Config *Config
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 88454625d..57e33fde7 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -33,13 +33,13 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(logs.ServingRequest)
+ exec.log.Debug(ctx, logs.ServingRequest)
if err := exec.executeLocal(ctx); err != nil {
- exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
return err
}
- exec.log.Debug(logs.OperationFinishedSuccessfully)
+ exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
return nil
}
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index ec771320e..a99ba3586 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -34,13 +35,13 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = &logger.Logger{Logger: l.With(
+ exec.log = l.With(
zap.String("request", "DELETE"),
zap.Stringer("address", exec.address()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )}
+ )
}
func (exec *execCtx) isLocal() bool {
@@ -83,16 +84,16 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
exec.splitInfo = errSplitInfo.SplitInfo()
exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
- exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
+ exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
if err := exec.collectMembers(ctx); err != nil {
return err
}
- exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
+ exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected)
return nil
case errors.As(err, &errECInfo):
- exec.log.Debug(logs.DeleteECObjectReceived)
+ exec.log.Debug(ctx, logs.DeleteECObjectReceived)
return nil
}
@@ -108,7 +109,7 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
func (exec *execCtx) collectMembers(ctx context.Context) error {
if exec.splitInfo == nil {
- exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
+ exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY)
return nil
}
@@ -131,7 +132,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error {
func (exec *execCtx) collectChain(ctx context.Context) error {
var chain []oid.ID
- exec.log.Debug(logs.DeleteAssemblingChain)
+ exec.log.Debug(ctx, logs.DeleteAssemblingChain)
for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
chain = append(chain, prev)
@@ -152,7 +153,7 @@ func (exec *execCtx) collectChain(ctx context.Context) error {
}
func (exec *execCtx) collectChildren(ctx context.Context) error {
- exec.log.Debug(logs.DeleteCollectingChildren)
+ exec.log.Debug(ctx, logs.DeleteCollectingChildren)
children, err := exec.svc.header.children(ctx, exec)
if err != nil {
@@ -165,7 +166,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) error {
}
func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
- exec.log.Debug(logs.DeleteSupplementBySplitID)
+ exec.log.Debug(ctx, logs.DeleteSupplementBySplitID)
chain, err := exec.svc.searcher.splitMembers(ctx, exec)
if err != nil {
@@ -182,7 +183,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
for i := range members {
for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body
if members[i].Equals(incoming[j]) {
- incoming = append(incoming[:j], incoming[j+1:]...)
+ incoming = slices.Delete(incoming, j, j+1)
j--
}
}
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 2c3c47f49..01b2d9b3f 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -10,13 +10,13 @@ import (
)
func (exec *execCtx) executeLocal(ctx context.Context) error {
- exec.log.Debug(logs.DeleteFormingTombstoneStructure)
+ exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure)
if err := exec.formTombstone(ctx); err != nil {
return err
}
- exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
+ exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
return exec.saveTombstone(ctx)
}
@@ -33,7 +33,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) error {
)
exec.addMembers([]oid.ID{exec.address().Object()})
- exec.log.Debug(logs.DeleteFormingSplitInfo)
+ exec.log.Debug(ctx, logs.DeleteFormingSplitInfo)
if err := exec.formExtendedInfo(ctx); err != nil {
return fmt.Errorf("form extended info: %w", err)
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index e4f7a8c50..1c4d7d585 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -72,7 +72,7 @@ func New(gs *getsvc.Service,
opts ...Option,
) *Service {
c := &cfg{
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
header: &headSvcWrapper{s: gs},
searcher: &searchSvcWrapper{s: ss},
placer: &putSvcWrapper{s: ps},
@@ -92,6 +92,6 @@ func New(gs *getsvc.Service,
// WithLogger returns option to specify Delete service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "objectSDK.Delete service"))}
+ c.log = l
}
}
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index 9f17f1e4c..e80132489 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -13,7 +13,7 @@ import (
func (r *request) assemble(ctx context.Context) {
if !r.canAssembleComplexObject() {
- r.log.Debug(logs.GetCanNotAssembleTheObject)
+ r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
return
}
@@ -35,23 +35,23 @@ func (r *request) assemble(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(logs.GetTryingToAssembleTheObject)
+ r.log.Debug(ctx, logs.GetTryingToAssembleTheObject)
r.prm.common = r.prm.common.WithLocalOnly(false)
assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly())
- r.log.Debug(logs.GetAssemblingSplittedObject,
+ r.log.Debug(ctx, logs.GetAssemblingSplittedObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted,
+ defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil {
- r.log.Warn(logs.GetFailedToAssembleSplittedObject,
+ r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject,
zap.Error(err),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
@@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque
detachedExecutor.execute(ctx)
- return detachedExecutor.statusError.err
+ return detachedExecutor.err
}
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
index 03f913bbf..59dd7fd93 100644
--- a/pkg/services/object/get/assembleec.go
+++ b/pkg/services/object/get/assembleec.go
@@ -12,7 +12,7 @@ import (
func (r *request) assembleEC(ctx context.Context) {
if r.isRaw() {
- r.log.Debug(logs.GetCanNotAssembleTheObject)
+ r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
return
}
@@ -34,10 +34,10 @@ func (r *request) assembleEC(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(logs.GetTryingToAssembleTheECObject)
+ r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject)
// initialize epoch number
- ok := r.initEpoch()
+ ok := r.initEpoch(ctx)
if !ok {
return
}
@@ -45,18 +45,18 @@ func (r *request) assembleEC(ctx context.Context) {
r.prm.common = r.prm.common.WithLocalOnly(false)
assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
- r.log.Debug(logs.GetAssemblingECObject,
+ r.log.Debug(ctx, logs.GetAssemblingECObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(logs.GetAssemblingECObjectCompleted,
+ defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) {
- r.log.Warn(logs.GetFailedToAssembleECObject,
+ r.log.Warn(ctx, logs.GetFailedToAssembleECObject,
zap.Error(err),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go
index ff3f90bf2..b24c9417b 100644
--- a/pkg/services/object/get/assembler.go
+++ b/pkg/services/object/get/assembler.go
@@ -2,6 +2,7 @@ package getsvc
import (
"context"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -59,53 +60,24 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS
if previousID == nil && len(childrenIDs) == 0 {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
}
+
if len(childrenIDs) > 0 {
- if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil {
- return nil, err
+ if a.rng != nil {
+ err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer)
+ } else {
+ err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer)
}
} else {
- if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil {
- return nil, err
+ if a.rng != nil {
+ err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer)
+ } else {
+ err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer)
}
}
- return a.parentObject, nil
-}
-
-func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- var sourceObjectIDs []oid.ID
- sourceObjectID, ok := a.splitInfo.Link()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- sourceObjectID, ok = a.splitInfo.LastPart()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- if len(sourceObjectIDs) == 0 {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- for _, sourceObjectID = range sourceObjectIDs {
- obj, err := a.getParent(ctx, sourceObjectID, writer)
- if err == nil {
- return obj, nil
- }
- }
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
-}
-
-func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
- obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
if err != nil {
return nil, err
}
- parent := obj.Parent()
- if parent == nil {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- if err := writer.WriteHeader(ctx, parent); err != nil {
- return nil, err
- }
- return obj, nil
+ return a.parentObject, nil
}
func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) {
@@ -190,26 +162,16 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD
}
func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if a.rng == nil {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
- }
- return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true)
- }
-
- if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
return err
}
- return writer.WriteChunk(ctx, a.parentObject.Payload())
+ return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true)
}
func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if a.rng == nil {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
- }
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
}
-
if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil {
return err
}
@@ -219,16 +181,9 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev
return nil
}
-func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error {
- withRng := len(partRanges) > 0 && a.rng != nil
-
+func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error {
for i := range partIDs {
- var r *objectSDK.Range
- if withRng {
- r = &partRanges[i]
- }
-
- _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer)
+ _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer)
if err != nil {
return err
}
@@ -237,22 +192,13 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec
}
func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, rngs, err := a.buildChain(ctx, prevID)
+ chain, err := a.buildChain(ctx, prevID)
if err != nil {
return err
}
- reverseRngs := len(rngs) > 0
-
- for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 {
- chain[left], chain[right] = chain[right], chain[left]
-
- if reverseRngs {
- rngs[left], rngs[right] = rngs[right], rngs[left]
- }
- }
-
- return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false)
+ slices.Reverse(chain)
+ return a.assemblePayloadByObjectIDs(ctx, writer, chain, false)
}
func (a *assembler) isChild(obj *objectSDK.Object) bool {
@@ -260,63 +206,28 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool {
return parent == nil || equalAddresses(a.addr, object.AddressOf(parent))
}
-func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
+func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) {
var (
chain []oid.ID
- rngs []objectSDK.Range
- from = a.rng.GetOffset()
- to = from + a.rng.GetLength()
hasPrev = true
)
// fill the chain end-to-start
for hasPrev {
- // check that only for "range" requests,
- // for `GET` it stops via the false `withPrev`
- if a.rng != nil && a.currentOffset <= from {
- break
- }
-
head, err := a.objGetter.HeadObject(ctx, prevID)
if err != nil {
- return nil, nil, err
+ return nil, err
}
if !a.isChild(head) {
- return nil, nil, errParentAddressDiffers
+ return nil, errParentAddressDiffers
}
- if a.rng != nil {
- sz := head.PayloadSize()
-
- a.currentOffset -= sz
-
- if a.currentOffset < to {
- off := uint64(0)
- if from > a.currentOffset {
- off = from - a.currentOffset
- sz -= from - a.currentOffset
- }
-
- if to < a.currentOffset+off+sz {
- sz = to - off - a.currentOffset
- }
-
- index := len(rngs)
- rngs = append(rngs, objectSDK.Range{})
- rngs[index].SetOffset(off)
- rngs[index].SetLength(sz)
-
- id, _ := head.ID()
- chain = append(chain, id)
- }
- } else {
- id, _ := head.ID()
- chain = append(chain, id)
- }
+ id, _ := head.ID()
+ chain = append(chain, id)
prevID, hasPrev = head.PreviousID()
}
- return chain, rngs, nil
+ return chain, nil
}
diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go
new file mode 100644
index 000000000..ff213cb82
--- /dev/null
+++ b/pkg/services/object/get/assembler_head.go
@@ -0,0 +1,45 @@
+package getsvc
+
+import (
+ "context"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ var sourceObjectIDs []oid.ID
+ sourceObjectID, ok := a.splitInfo.Link()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ sourceObjectID, ok = a.splitInfo.LastPart()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ if len(sourceObjectIDs) == 0 {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ for _, sourceObjectID = range sourceObjectIDs {
+ obj, err := a.getParent(ctx, sourceObjectID, writer)
+ if err == nil {
+ return obj, nil
+ }
+ }
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+}
+
+func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
+ if err != nil {
+ return nil, err
+ }
+ parent := obj.Parent()
+ if parent == nil {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ if err := writer.WriteHeader(ctx, parent); err != nil {
+ return nil, err
+ }
+ return obj, nil
+}
diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go
new file mode 100644
index 000000000..780693c40
--- /dev/null
+++ b/pkg/services/object/get/assembler_range.go
@@ -0,0 +1,87 @@
+package getsvc
+
+import (
+ "context"
+ "slices"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
+ if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
+ return err
+ }
+ return writer.WriteChunk(ctx, a.parentObject.Payload())
+}
+
+func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
+ if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil {
+ return err
+ }
+ if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part
+ return err
+ }
+ return nil
+}
+
+func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error {
+ for i := range partIDs {
+ _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
+ chain, rngs, err := a.buildChainRange(ctx, prevID)
+ if err != nil {
+ return err
+ }
+
+ slices.Reverse(chain)
+ slices.Reverse(rngs)
+ return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs)
+}
+
+func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
+ var (
+ chain []oid.ID
+ rngs []objectSDK.Range
+ from = a.rng.GetOffset()
+ to = from + a.rng.GetLength()
+
+ hasPrev = true
+ )
+
+ // fill the chain end-to-start
+ for hasPrev && from < a.currentOffset {
+ head, err := a.objGetter.HeadObject(ctx, prevID)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !a.isChild(head) {
+ return nil, nil, errParentAddressDiffers
+ }
+
+ nextOffset := a.currentOffset - head.PayloadSize()
+ clampedFrom := max(from, nextOffset)
+ clampedTo := min(to, a.currentOffset)
+ if clampedFrom < clampedTo {
+ index := len(rngs)
+ rngs = append(rngs, objectSDK.Range{})
+ rngs[index].SetOffset(clampedFrom - nextOffset)
+ rngs[index].SetLength(clampedTo - clampedFrom)
+
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
+
+ a.currentOffset = nextOffset
+ prevID, hasPrev = head.PreviousID()
+ }
+
+ return chain, rngs, nil
+}
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index 44d9af3a2..e0a7e1da6 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -125,7 +125,7 @@ func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter
func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) {
objID := a.addr.Object()
- trav, cnr, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch)
+ trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch)
if err != nil {
return nil, err
}
@@ -155,7 +155,7 @@ func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Travers
parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount)
if err != nil {
- a.log.Debug(logs.GetUnableToGetAllPartsECObject, zap.Error(err))
+ a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err))
}
return parts
}
@@ -229,7 +229,7 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
var objID oid.ID
err := objID.ReadFromV2(ch.ID)
if err != nil {
- a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
return nil
}
var addr oid.Address
@@ -238,15 +238,13 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
var object *objectSDK.Object
if a.head {
object, err = a.localStorage.Head(ctx, addr, false)
- if err != nil {
- a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
- return nil
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
}
} else {
object, err = a.localStorage.Get(ctx, addr)
- if err != nil {
- a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
- return nil
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
}
}
return object
@@ -259,11 +257,11 @@ func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.N
var errECInfo *objectSDK.ECInfoError
_, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true)
if err == nil {
- a.log.Error(logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
+ a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
return nil
}
if !errors.As(err, &errECInfo) {
- a.log.Warn(logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
+ a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
return nil
}
result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks))
@@ -277,7 +275,7 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
var objID oid.ID
err := objID.ReadFromV2(ch.ID)
if err != nil {
- a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
return nil
}
var addr oid.Address
@@ -286,15 +284,13 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
var object *objectSDK.Object
if a.head {
object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false)
- if err != nil {
- a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
- return nil
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
}
} else {
object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node)
- if err != nil {
- a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
- return nil
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
}
}
return object
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index 034768c81..dfb31133c 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -10,34 +10,25 @@ import (
func (r *request) executeOnContainer(ctx context.Context) {
if r.isLocal() {
- r.log.Debug(logs.GetReturnResultDirectly)
+ r.log.Debug(ctx, logs.GetReturnResultDirectly)
return
}
lookupDepth := r.netmapLookupDepth()
- r.log.Debug(logs.TryingToExecuteInContainer,
+ r.log.Debug(ctx, logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- ok := r.initEpoch()
+ ok := r.initEpoch(ctx)
if !ok {
return
}
localStatus := r.status
- for {
- if r.processCurrentEpoch(ctx, localStatus) {
- break
- }
-
- // check the maximum depth has been reached
- if lookupDepth == 0 {
- break
- }
-
+ for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 {
lookupDepth--
// go to the previous epoch
@@ -46,11 +37,11 @@ func (r *request) executeOnContainer(ctx context.Context) {
}
func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
- r.log.Debug(logs.ProcessEpoch,
+ r.log.Debug(ctx, logs.ProcessEpoch,
zap.Uint64("number", r.curProcEpoch),
)
- traverser, ok := r.generateTraverser(r.address())
+ traverser, ok := r.generateTraverser(ctx, r.address())
if !ok {
return true
}
@@ -67,7 +58,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- r.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
+ r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
return false
}
@@ -75,7 +66,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
for i := range addrs {
select {
case <-ctx.Done():
- r.log.Debug(logs.InterruptPlacementIterationByContext,
+ r.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
zap.Error(ctx.Err()),
)
@@ -91,7 +82,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
client.NodeInfoFromNetmapElement(&info, addrs[i])
if r.processNode(ctx, info) {
- r.log.Debug(logs.GetCompletingTheOperation)
+ r.log.Debug(ctx, logs.GetCompletingTheOperation)
return true
}
}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 03b7f8bf2..3a50308c2 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -87,51 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error {
exec.execute(ctx)
- return exec.statusError.err
+ return exec.err
}
-func (exec *request) execute(ctx context.Context) {
- exec.log.Debug(logs.ServingRequest)
+func (r *request) execute(ctx context.Context) {
+ r.log.Debug(ctx, logs.ServingRequest)
// perform local operation
- exec.executeLocal(ctx)
+ r.executeLocal(ctx)
- exec.analyzeStatus(ctx, true)
+ r.analyzeStatus(ctx, true)
}
-func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
+func (r *request) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
- switch exec.status {
+ switch r.status {
case statusOK:
- exec.log.Debug(logs.OperationFinishedSuccessfully)
+ r.log.Debug(ctx, logs.OperationFinishedSuccessfully)
case statusINHUMED:
- exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
+ r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved)
case statusVIRTUAL:
- exec.log.Debug(logs.GetRequestedObjectIsVirtual)
- exec.assemble(ctx)
+ r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual)
+ r.assemble(ctx)
case statusOutOfRange:
- exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
+ r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds)
case statusEC:
- exec.log.Debug(logs.GetRequestedObjectIsEC)
- if exec.isRaw() && execCnr {
- exec.executeOnContainer(ctx)
- exec.analyzeStatus(ctx, false)
+ r.log.Debug(ctx, logs.GetRequestedObjectIsEC)
+ if r.isRaw() && execCnr {
+ r.executeOnContainer(ctx)
+ r.analyzeStatus(ctx, false)
}
- exec.assembleEC(ctx)
+ r.assembleEC(ctx)
default:
- exec.log.Debug(logs.OperationFinishedWithError,
- zap.Error(exec.err),
+ r.log.Debug(ctx, logs.OperationFinishedWithError,
+ zap.Error(r.err),
)
var errAccessDenied *apistatus.ObjectAccessDenied
- if execCnr && errors.As(exec.err, &errAccessDenied) {
+ if execCnr && errors.As(r.err, &errAccessDenied) {
// Local get can't return access denied error, so this error was returned by
// write to the output stream. So there is no need to try to find object on other nodes.
return
}
if execCnr {
- exec.executeOnContainer(ctx)
- exec.analyzeStatus(ctx, false)
+ r.executeOnContainer(ctx)
+ r.analyzeStatus(ctx, false)
}
}
}
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 6827018dc..3efc72065 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -63,7 +63,7 @@ type testClient struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch() (uint64, error) {
+func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
return uint64(e), nil
}
@@ -79,7 +79,7 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
+func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
opts := make([]placement.Option, 0, 4)
opts = append(opts,
placement.ForContainer(g.c),
@@ -91,13 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e ui
opts = append(opts, placement.ForObject(*obj))
}
- t, err := placement.NewTraverser(opts...)
+ t, err := placement.NewTraverser(context.Background(), opts...)
return t, &containerCore.Container{
Value: g.c,
}, err
}
-func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go
index 599a6f176..83ef54744 100644
--- a/pkg/services/object/get/getrangeec_test.go
+++ b/pkg/services/object/get/getrangeec_test.go
@@ -28,14 +28,14 @@ type containerStorage struct {
cnt *container.Container
}
-func (cs *containerStorage) Get(cid.ID) (*coreContainer.Container, error) {
+func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) {
coreCnt := coreContainer.Container{
Value: *cs.cnt,
}
return &coreCnt, nil
}
-func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) {
+func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
return nil, nil
}
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index 1cd5e549c..cfabb082f 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -31,7 +31,7 @@ func (r *request) executeLocal(ctx context.Context) {
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetLocalGetFailed, zap.Error(err))
+ r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index f2639f8e6..78ca5b5e3 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -18,9 +18,9 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
defer span.End()
- r.log.Debug(logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
+ r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
- rs, ok := r.getRemoteStorage(info)
+ rs, ok := r.getRemoteStorage(ctx, info)
if !ok {
return true
}
@@ -35,7 +35,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
switch {
default:
- r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err))
+ r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err))
if r.status != statusEC {
// for raw requests, continue to collect other parts
r.status = statusUndefined
diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go
index 0df67dec9..2c64244cf 100644
--- a/pkg/services/object/get/remote_getter.go
+++ b/pkg/services/object/get/remote_getter.go
@@ -30,7 +30,7 @@ func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Ob
if err != nil {
return nil, err
}
- epoch, err := g.es.Epoch()
+ epoch, err := g.es.Epoch(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go
index 1a7a43a35..268080486 100644
--- a/pkg/services/object/get/request.go
+++ b/pkg/services/object/get/request.go
@@ -47,14 +47,14 @@ func (r *request) setLogger(l *logger.Logger) {
req = "GET_RANGE"
}
- r.log = &logger.Logger{Logger: l.With(
+ r.log = l.With(
zap.String("request", req),
zap.Stringer("address", r.address()),
zap.Bool("raw", r.isRaw()),
zap.Bool("local", r.isLocal()),
zap.Bool("with session", r.prm.common.SessionToken() != nil),
zap.Bool("with bearer", r.prm.common.BearerToken() != nil),
- )}
+ )
}
func (r *request) isLocal() bool {
@@ -116,20 +116,20 @@ func (r *request) netmapLookupDepth() uint64 {
return r.prm.common.NetmapLookupDepth()
}
-func (r *request) initEpoch() bool {
+func (r *request) initEpoch(ctx context.Context) bool {
r.curProcEpoch = r.netmapEpoch()
if r.curProcEpoch > 0 {
return true
}
- e, err := r.epochSource.Epoch()
+ e, err := r.epochSource.Epoch(ctx)
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
+ r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
return false
case err == nil:
@@ -138,17 +138,17 @@ func (r *request) initEpoch() bool {
}
}
-func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) {
+func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) {
obj := addr.Object()
- t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch)
+ t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch)
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
+ r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
return nil, false
case err == nil:
@@ -156,13 +156,13 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo
}
}
-func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) {
+func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) {
rs, err := r.remoteStorageConstructor.Get(info)
if err != nil {
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient)
+ r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient)
return nil, false
}
@@ -185,7 +185,7 @@ func (r *request) writeCollectedHeader(ctx context.Context) bool {
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err))
+ r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
@@ -206,7 +206,7 @@ func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object)
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err))
+ r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index 3413abeb7..a103f5a7f 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -34,7 +34,7 @@ func New(
result := &Service{
keyStore: ks,
epochSource: es,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
localStorage: &engineLocalStorage{
engine: e,
},
@@ -53,6 +53,6 @@ func New(
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(s *Service) {
- s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))}
+ s.log = l
}
}
diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go
index 9669afdba..664366d1b 100644
--- a/pkg/services/object/get/types.go
+++ b/pkg/services/object/get/types.go
@@ -20,11 +20,11 @@ import (
)
type epochSource interface {
- Epoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
}
type traverserGenerator interface {
- GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
type keyStorage interface {
diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go
index e8e82ddd9..308ccd512 100644
--- a/pkg/services/object/get/v2/get_range_hash.go
+++ b/pkg/services/object/get/v2/get_range_hash.go
@@ -22,7 +22,7 @@ import (
// GetRangeHash calls internal service and returns v2 response.
func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- forward, err := s.needToForwardGetRangeHashRequest(req)
+ forward, err := s.needToForwardGetRangeHashRequest(ctx, req)
if err != nil {
return nil, err
}
@@ -48,7 +48,7 @@ type getRangeForwardParams struct {
address oid.Address
}
-func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
+func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
if req.GetMetaHeader().GetTTL() <= 1 {
return getRangeForwardParams{}, nil
}
@@ -66,17 +66,17 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq
}
result.address = addr
- cont, err := s.contSource.Get(addr.Container())
+ cont, err := s.contSource.Get(ctx, addr.Container())
if err != nil {
return result, fmt.Errorf("(%T) could not get container: %w", s, err)
}
- epoch, err := s.netmapSource.Epoch()
+ epoch, err := s.netmapSource.Epoch(ctx)
if err != nil {
return result, fmt.Errorf("(%T) could not get epoch: %w", s, err)
}
- nm, err := s.netmapSource.GetNetMapByEpoch(epoch)
+ nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch)
if err != nil {
return result, fmt.Errorf("(%T) could not get netmap: %w", s, err)
}
@@ -84,7 +84,7 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq
builder := placement.NewNetworkMapBuilder(nm)
objectID := addr.Object()
- nodesVector, err := builder.BuildPlacement(addr.Container(), &objectID, cont.Value.PlacementPolicy())
+ nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy())
if err != nil {
return result, fmt.Errorf("(%T) could not build object placement: %w", s, err)
}
@@ -125,14 +125,14 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
var addrGr network.AddressGroup
if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil {
- s.log.Warn(logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
continue
}
var extAddr network.AddressGroup
if len(node.ExternalAddresses()) > 0 {
if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil {
- s.log.Warn(logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
continue
}
}
@@ -150,12 +150,12 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
if firstErr == nil {
firstErr = err
}
- s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromNode,
+ s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode,
zap.String("node_public_key", hex.EncodeToString(node.PublicKey())),
zap.Stringer("address", params.address),
zap.Error(err))
}
- s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
+ s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
if firstErr != nil {
return nil, firstErr
}
diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go
index 24b2f0099..0ec8912fd 100644
--- a/pkg/services/object/get/v2/service.go
+++ b/pkg/services/object/get/v2/service.go
@@ -60,7 +60,7 @@ func NewService(svc *getsvc.Service,
netmapSource: netmapSource,
announcedKeys: announcedKeys,
contSource: contSource,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
}
for i := range opts {
@@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get V2 service"))}
+ c.log = l
}
}
diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go
index 98207336c..0d73bcd4d 100644
--- a/pkg/services/object/get/v2/streamer.go
+++ b/pkg/services/object/get/v2/streamer.go
@@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec
p.SetHeader(objV2.GetHeader())
p.SetSignature(objV2.GetSignature())
- return s.GetObjectStream.Send(newResponse(p))
+ return s.Send(newResponse(p))
}
func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error {
p := new(objectV2.GetObjectPartChunk)
p.SetChunk(chunk)
- return s.GetObjectStream.Send(newResponse(p))
+ return s.Send(newResponse(p))
}
func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
@@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
}
func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error {
- return s.GetObjectRangeStream.Send(newRangeResponse(chunk))
+ return s.Send(newRangeResponse(chunk))
}
func newRangeResponse(p []byte) *objectV2.GetRangeResponse {
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index bfa7fd619..e699a3779 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -3,6 +3,7 @@ package getsvc
import (
"context"
"crypto/sha256"
+ "errors"
"hash"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -182,9 +183,7 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran
default:
return nil, errUnknownChechsumType(t)
case refs.SHA256:
- p.SetHashGenerator(func() hash.Hash {
- return sha256.New()
- })
+ p.SetHashGenerator(sha256.New)
case refs.TillichZemor:
p.SetHashGenerator(func() hash.Hash {
return tz.New()
@@ -360,19 +359,20 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien
info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) {
var err error
-
- defer func() {
- stop = err == nil
-
- if stop || firstErr == nil {
- firstErr = err
- }
-
- // would be nice to log otherwise
- }()
-
res, err = f(ctx, addr, c, key)
+ // non-status logic error that could be returned
+ // from the SDK client; should not be considered
+ // as a connection error
+ var siErr *objectSDK.SplitInfoError
+ var eiErr *objectSDK.ECInfoError
+
+ stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr)
+
+ if stop || firstErr == nil {
+ firstErr = err
+ }
+
return
})
diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go
index 2c405070d..3e8832640 100644
--- a/pkg/services/object/internal/client/client.go
+++ b/pkg/services/object/internal/client/client.go
@@ -7,9 +7,11 @@ import (
"errors"
"fmt"
"io"
+ "strconv"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -31,6 +33,8 @@ type commonPrm struct {
local bool
xHeaders []string
+
+ netmapEpoch uint64
}
// SetClient sets base client for ForstFS API communication.
@@ -73,6 +77,14 @@ func (x *commonPrm) SetXHeaders(hs []string) {
x.xHeaders = hs
}
+func (x *commonPrm) calculateXHeaders() []string {
+ hs := x.xHeaders
+ if x.netmapEpoch != 0 {
+ hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10))
+ }
+ return hs
+}
+
type readPrmCommon struct {
commonPrm
}
@@ -80,8 +92,8 @@ type readPrmCommon struct {
// SetNetmapEpoch sets the epoch number to be used to locate the objectSDK.
//
// By default current epoch on the server will be used.
-func (x *readPrmCommon) SetNetmapEpoch(_ uint64) {
- // FIXME(@fyrchik): https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/465
+func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) {
+ x.netmapEpoch = epoch
}
// GetObjectPrm groups parameters of GetObject operation.
@@ -139,7 +151,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.xHeaders
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Key = prm.key
@@ -233,7 +245,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error)
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
- prm.ClientParams.XHeaders = prm.xHeaders
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams)
if err == nil {
@@ -326,7 +338,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.xHeaders
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Length = prm.ln
@@ -390,7 +402,7 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
defer span.End()
prmCli := client.PrmObjectPutInit{
- XHeaders: prm.xHeaders,
+ XHeaders: prm.calculateXHeaders(),
BearerToken: prm.tokenBearer,
Session: prm.tokenSession,
Local: true,
@@ -437,7 +449,7 @@ func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, erro
}
prmCli := client.PrmObjectPutSingle{
- XHeaders: prm.xHeaders,
+ XHeaders: prm.calculateXHeaders(),
BearerToken: prm.tokenBearer,
Session: prm.tokenSession,
Local: true,
@@ -496,7 +508,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
prm.cliPrm.Local = prm.local
prm.cliPrm.Session = prm.tokenSession
prm.cliPrm.BearerToken = prm.tokenBearer
- prm.cliPrm.XHeaders = prm.xHeaders
+ prm.cliPrm.XHeaders = prm.calculateXHeaders()
prm.cliPrm.Key = prm.key
rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm)
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index 377350fdd..6a6ee0f0f 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -4,6 +4,7 @@ import (
"context"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
@@ -34,7 +35,7 @@ type (
}
MetricRegister interface {
- AddRequestDuration(string, time.Duration, bool)
+ AddRequestDuration(string, time.Duration, bool, string)
AddPayloadSize(string, int)
}
)
@@ -51,7 +52,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
if m.enabled {
t := time.Now()
defer func() {
- m.metrics.AddRequestDuration("Get", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
}()
err = m.next.Get(req, &getStreamMetric{
ServerStream: stream,
@@ -64,11 +65,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
return
}
-func (m MetricCollector) Put() (PutObjectStream, error) {
+func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Put()
+ stream, err := m.next.Put(ctx)
if err != nil {
return nil, err
}
@@ -79,14 +80,14 @@ func (m MetricCollector) Put() (PutObjectStream, error) {
start: t,
}, nil
}
- return m.next.Put()
+ return m.next.Put(ctx)
}
-func (m MetricCollector) Patch() (PatchObjectStream, error) {
+func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Patch()
+ stream, err := m.next.Patch(ctx)
if err != nil {
return nil, err
}
@@ -97,7 +98,7 @@ func (m MetricCollector) Patch() (PatchObjectStream, error) {
start: t,
}, nil
}
- return m.next.Patch()
+ return m.next.Patch(ctx)
}
func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) {
@@ -106,7 +107,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl
res, err := m.next.PutSingle(ctx, request)
- m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
if err == nil {
m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload()))
}
@@ -122,7 +123,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest)
res, err := m.next.Head(ctx, request)
- m.metrics.AddRequestDuration("Head", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
@@ -135,7 +136,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream)
err := m.next.Search(req, stream)
- m.metrics.AddRequestDuration("Search", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
return err
}
@@ -148,7 +149,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque
res, err := m.next.Delete(ctx, request)
- m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
return m.next.Delete(ctx, request)
@@ -160,7 +161,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR
err := m.next.GetRange(req, stream)
- m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
return err
}
@@ -173,7 +174,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa
res, err := m.next.GetRangeHash(ctx, request)
- m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
@@ -209,7 +210,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error
func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil)
+ s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
@@ -223,7 +224,7 @@ func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) e
func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil)
+ s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index 953f82b48..5d298bfed 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config,
// Patch calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
- nodeKey, err := s.Config.KeyStorage.GetKey(nil)
+ nodeKey, err := s.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 91b4efdc1..ff13b1d3e 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -112,7 +112,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
}
oV2.GetHeader().SetOwnerID(ownerID)
- target, err := target.New(objectwriter.Params{
+ target, err := target.New(ctx, objectwriter.Params{
Config: s.Config,
Common: commonPrm,
Header: objectSDK.NewFromV2(oV2),
@@ -195,7 +195,12 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
patch.FromV2(req.GetBody())
if !s.nonFirstSend {
- err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes)
+ err := s.patcher.ApplyHeaderPatch(ctx,
+ patcher.ApplyHeaderPatchPrm{
+ NewSplitHeader: patch.NewSplitHeader,
+ NewAttributes: patch.NewAttributes,
+ ReplaceAttributes: patch.ReplaceAttributes,
+ })
if err != nil {
return fmt.Errorf("patch attributes: %w", err)
}
@@ -214,6 +219,9 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
}
func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ if s.patcher == nil {
+ return nil, errors.New("uninitialized patch streamer")
+ }
patcherResp, err := s.patcher.Close(ctx)
if err != nil {
return nil, err
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index 8cf4f0d62..7aeb5857d 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -27,9 +26,7 @@ func NewService(ks *objutil.KeyStorage,
opts ...objectwriter.Option,
) *Service {
c := &objectwriter.Config{
- RemotePool: util.NewPseudoWorkerPool(),
- LocalPool: util.NewPseudoWorkerPool(),
- Logger: &logger.Logger{Logger: zap.L()},
+ Logger: logger.NewLoggerWrapper(zap.L()),
KeyStorage: ks,
ClientConstructor: cc,
MaxSizeSrc: ms,
@@ -59,8 +56,8 @@ func NewService(ks *objutil.KeyStorage,
}
}
-func (p *Service) Put() (*Streamer, error) {
+func (s *Service) Put() (*Streamer, error) {
return &Streamer{
- Config: p.Config,
+ Config: s.Config,
}, nil
}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 3a0b3901f..90f473254 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -21,7 +21,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
@@ -29,6 +28,7 @@ import (
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
@@ -86,7 +86,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest
}
func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
- if err := s.validarePutSingleSize(obj); err != nil {
+ if err := s.validarePutSingleSize(ctx, obj); err != nil {
return object.ContentMeta{}, err
}
@@ -97,12 +97,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object)
return s.validatePutSingleObject(ctx, obj)
}
-func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error {
+func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error {
if uint64(len(obj.Payload())) != obj.PayloadSize() {
return target.ErrWrongPayloadSize
}
- maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize()
+ maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx)
if obj.PayloadSize() > maxAllowedSize {
return target.ErrExceedingMaxSize
}
@@ -153,7 +153,7 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob
func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
localOnly := req.GetMetaHeader().GetTTL() <= 1
- placement, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly)
+ placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly)
if err != nil {
return err
}
@@ -166,13 +166,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
}
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
- iter := s.Config.NewNodeIterator(placement.placementOptions)
+ iter := s.NewNodeIterator(placement.placementOptions)
iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.Config.KeyStorage,
+ keyStorage: s.KeyStorage,
signer: &sync.Once{},
}
@@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
if err != nil {
return err
}
- key, err := s.Config.KeyStorage.GetKey(nil)
+ key, err := s.KeyStorage.GetKey(nil)
if err != nil {
return err
}
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.Config.KeyStorage,
+ keyStorage: s.KeyStorage,
signer: &sync.Once{},
}
@@ -218,14 +218,14 @@ type putSinglePlacement struct {
resetSuccessAfterOnBroadcast bool
}
-func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
+func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
var result putSinglePlacement
cnrID, ok := obj.ContainerID()
if !ok {
return result, errors.New("missing container ID")
}
- cnrInfo, err := s.Config.ContainerSource.Get(cnrID)
+ cnrInfo, err := s.ContainerSource.Get(ctx, cnrID)
if err != nil {
return result, fmt.Errorf("could not get container by ID: %w", err)
}
@@ -249,14 +249,14 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
}
result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
- latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource)
+ latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource)
if err != nil {
return result, fmt.Errorf("could not get latest network map: %w", err)
}
builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
- builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys)
+ builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys)
}
result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
return result, nil
@@ -273,7 +273,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
- c, err := s.Config.ClientConstructor.Get(info)
+ c, err := s.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
@@ -283,7 +283,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
localTarget := &objectwriter.LocalTarget{
- Storage: s.Config.LocalStore,
+ Storage: s.LocalStore,
Container: container,
}
return localTarget.WriteObject(ctx, obj, meta)
@@ -317,12 +317,11 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
- s.Config.Logger.Warn(logs.PutSingleRedirectFailure,
+ s.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
zap.Error(err),
zap.Stringer("address", addr),
zap.Stringer("object_id", objID),
zap.Stringer("container_id", cnrID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
@@ -351,8 +350,12 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
err = signature.VerifyServiceMessage(resp)
if err != nil {
err = fmt.Errorf("response verification failed: %w", err)
+ return
}
+ st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus())
+ err = apistatus.ErrFromStatus(st)
+
return
})
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index f71309d31..19768b7fa 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -36,7 +36,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
}
var err error
- p.target, err = target.New(prmTarget)
+ p.target, err = target.New(ctx, prmTarget)
if err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 36b514fbc..f0c648187 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -56,10 +56,10 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
s.saveChunks = v.GetSignature() != nil
if s.saveChunks {
- maxSz := s.stream.MaxSizeSrc.MaxObjectSize()
+ maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx)
s.sizes = &sizes{
- payloadSz: uint64(v.GetHeader().GetPayloadLength()),
+ payloadSz: v.GetHeader().GetPayloadLength(),
}
// check payload size limit overflow
diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go
new file mode 100644
index 000000000..01eb1ea8d
--- /dev/null
+++ b/pkg/services/object/qos.go
@@ -0,0 +1,145 @@
+package object
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+)
+
+var _ ServiceServer = (*qosObjectService)(nil)
+
+type AdjustIOTag interface {
+ AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
+}
+
+type qosObjectService struct {
+ next ServiceServer
+ adj AdjustIOTag
+}
+
+func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer {
+ return &qosObjectService{
+ next: next,
+ adj: adjIOTag,
+ }
+}
+
+func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Delete(ctx, req)
+}
+
+func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Get(req, &qosReadStream[*object.GetResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.GetRangeHash(ctx, req)
+}
+
+func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Head(ctx, req)
+}
+
+func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ s, err := q.next.Patch(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{
+ s: s,
+ adj: q.adj,
+ }, nil
+}
+
+func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) {
+ s, err := q.next.Put(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &qosWriteStream[*object.PutRequest, *object.PutResponse]{
+ s: s,
+ adj: q.adj,
+ }, nil
+}
+
+func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.PutSingle(ctx, req)
+}
+
+func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Search(req, &qosReadStream[*object.SearchResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+type qosSend[T any] interface {
+ Send(T) error
+}
+
+type qosReadStream[T any] struct {
+ sender qosSend[T]
+ ctxF func() context.Context
+}
+
+func (g *qosReadStream[T]) Context() context.Context {
+ return g.ctxF()
+}
+
+func (g *qosReadStream[T]) Send(resp T) error {
+ return g.sender.Send(resp)
+}
+
+type qosVerificationHeader interface {
+ GetVerificationHeader() *session.RequestVerificationHeader
+}
+
+type qosSendRecv[TReq qosVerificationHeader, TResp any] interface {
+ Send(context.Context, TReq) error
+ CloseAndRecv(context.Context) (TResp, error)
+}
+
+type qosWriteStream[TReq qosVerificationHeader, TResp any] struct {
+ s qosSendRecv[TReq, TResp]
+ adj AdjustIOTag
+
+ ioTag string
+ ioTagDefined bool
+}
+
+func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) {
+ if q.ioTagDefined {
+ ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
+ }
+ return q.s.CloseAndRecv(ctx)
+}
+
+func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error {
+ if !q.ioTagDefined {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx)
+ }
+ assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment")
+ ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
+ return q.s.Send(ctx, req)
+}
diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go
deleted file mode 100644
index 95d4c9d93..000000000
--- a/pkg/services/object/request_context.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package object
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-type RequestContextKeyT struct{}
-
-var RequestContextKey = RequestContextKeyT{}
-
-// RequestContext is a context passed between middleware handlers.
-type RequestContext struct {
- Namespace string
-
- SenderKey []byte
-
- ContainerOwner user.ID
-
- Role acl.Role
-
- SoftAPECheck bool
-
- BearerToken *bearer.Token
-}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index 3787b4168..80c971e8f 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -80,8 +80,8 @@ func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutRespo
return r, nil
}
-func (s *ResponseService) Put() (PutObjectStream, error) {
- stream, err := s.svc.Put()
+func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
+ stream, err := s.svc.Put(ctx)
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -109,8 +109,8 @@ func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchR
return r, nil
}
-func (s *ResponseService) Patch() (PatchObjectStream, error) {
- stream, err := s.svc.Patch()
+func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ stream, err := s.svc.Patch(ctx)
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index 39259b0ca..60d469b11 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -15,12 +15,12 @@ import (
func (exec *execCtx) executeOnContainer(ctx context.Context) error {
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug(logs.TryingToExecuteInContainer,
+ exec.log.Debug(ctx, logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- if err := exec.initEpoch(); err != nil {
+ if err := exec.initEpoch(ctx); err != nil {
return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err)
}
@@ -44,11 +44,11 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
- exec.log.Debug(logs.ProcessEpoch,
+ exec.log.Debug(ctx, logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
- traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch)
+ traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch)
if err != nil {
return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err)
}
@@ -59,7 +59,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
+ exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
break
}
@@ -72,8 +72,8 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
defer wg.Done()
select {
case <-ctx.Done():
- exec.log.Debug(logs.InterruptPlacementIterationByContext,
- zap.String("error", ctx.Err().Error()))
+ exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
+ zap.Error(ctx.Err()))
return
default:
}
@@ -82,18 +82,18 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+ exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
- exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err))
return
}
ids, err := c.searchObjects(ctx, exec, info)
if err != nil {
- exec.log.Debug(logs.SearchRemoteOperationFailed,
- zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchRemoteOperationFailed,
+ zap.Error(err))
return
}
@@ -102,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
err = exec.writeIDList(ids)
mtx.Unlock()
if err != nil {
- exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err))
return
}
}(i)
@@ -114,9 +114,9 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
return nil
}
-func (exec *execCtx) getContainer() (containerSDK.Container, error) {
+func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) {
cnrID := exec.containerID()
- cnr, err := exec.svc.containerSource.Get(cnrID)
+ cnr, err := exec.svc.containerSource.Get(ctx, cnrID)
if err != nil {
return containerSDK.Container{}, err
}
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index 4a2c04ecd..ced51ecce 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -1,6 +1,8 @@
package searchsvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -19,13 +21,13 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = &logger.Logger{Logger: l.With(
+ exec.log = l.With(
zap.String("request", "SEARCH"),
zap.Stringer("container", exec.containerID()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )}
+ )
}
func (exec *execCtx) isLocal() bool {
@@ -48,13 +50,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 {
return exec.prm.common.NetmapLookupDepth()
}
-func (exec *execCtx) initEpoch() error {
+func (exec *execCtx) initEpoch(ctx context.Context) error {
exec.curProcEpoch = exec.netmapEpoch()
if exec.curProcEpoch > 0 {
return nil
}
- e, err := exec.svc.currentEpochReceiver.Epoch()
+ e, err := exec.svc.currentEpochReceiver.Epoch(ctx)
if err != nil {
return err
}
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index cfaed13b8..ec65ab06a 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -11,7 +11,7 @@ import (
func (exec *execCtx) executeLocal(ctx context.Context) error {
ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil {
- exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err))
return err
}
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 4a5c414d5..76c091f85 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -20,26 +20,26 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(logs.ServingRequest)
+ exec.log.Debug(ctx, logs.ServingRequest)
err := exec.executeLocal(ctx)
- exec.logResult(err)
+ exec.logResult(ctx, err)
if exec.isLocal() {
- exec.log.Debug(logs.SearchReturnResultDirectly)
+ exec.log.Debug(ctx, logs.SearchReturnResultDirectly)
return err
}
err = exec.executeOnContainer(ctx)
- exec.logResult(err)
+ exec.logResult(ctx, err)
return err
}
-func (exec *execCtx) logResult(err error) {
+func (exec *execCtx) logResult(ctx context.Context, err error) {
switch {
default:
- exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
case err == nil:
- exec.log.Debug(logs.OperationFinishedSuccessfully)
+ exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
}
}
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index 0a40025e1..918ad421f 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -6,6 +6,7 @@ import (
"crypto/sha256"
"errors"
"fmt"
+ "slices"
"strconv"
"testing"
@@ -58,7 +59,7 @@ type simpleIDWriter struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch() (uint64, error) {
+func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
return uint64(e), nil
}
@@ -81,8 +82,8 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
- t, err := placement.NewTraverser(
+func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
+ t, err := placement.NewTraverser(context.Background(),
placement.ForContainer(g.c),
placement.UseBuilder(g.b[epoch]),
placement.WithoutSuccessTracking(),
@@ -90,7 +91,7 @@ func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch ui
return t, &containerCore.Container{Value: g.c}, err
}
-func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
@@ -103,8 +104,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.
return nil, errors.New("vectors for address not found")
}
- res := make([][]netmap.NodeInfo, len(vs))
- copy(res, vs)
+ res := slices.Clone(vs)
return res, nil
}
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index 7700f78d8..56fe56468 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -46,11 +46,11 @@ type cfg struct {
}
traverserGenerator interface {
- GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
currentEpochReceiver interface {
- Epoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
}
keyStore *util.KeyStorage
@@ -69,7 +69,7 @@ func New(e *engine.StorageEngine,
opts ...Option,
) *Service {
c := &cfg{
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
clientConstructor: &clientConstructorWrapper{
constructor: cc,
},
@@ -94,6 +94,6 @@ func New(e *engine.StorageEngine,
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))}
+ c.log = l
}
}
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 910384a0b..0be5345b9 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -2,6 +2,7 @@ package searchsvc
import (
"context"
+ "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -53,7 +54,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error {
}
// exclude processed address
- list = append(list[:i], list[i+1:]...)
+ list = slices.Delete(list, i, i+1)
i--
}
@@ -113,7 +114,7 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
}
func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
- cnr, err := exec.getContainer()
+ cnr, err := exec.getContainer(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index c570e9d8e..e65293977 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -41,8 +41,8 @@ type PatchObjectStream interface {
// serving v2 Object service.
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
- Put() (PutObjectStream, error)
- Patch() (PatchObjectStream, error)
+ Put(context.Context) (PutObjectStream, error)
+ Patch(context.Context) (PatchObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 2c5e794e9..fd8e926dd 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -96,15 +96,16 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- return nil, fmt.Errorf("could not close stream and receive response: %w", err)
+ err = fmt.Errorf("could not close stream and receive response: %w", err)
+ resp = new(object.PutResponse)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Put() (PutObjectStream, error) {
- stream, err := s.svc.Put()
+func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
+ stream, err := s.svc.Put(ctx)
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -132,15 +133,16 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- return nil, fmt.Errorf("could not close stream and receive response: %w", err)
+ err = fmt.Errorf("could not close stream and receive response: %w", err)
+ resp = new(object.PatchResponse)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Patch() (PatchObjectStream, error) {
- stream, err := s.svc.Patch()
+func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ stream, err := s.svc.Patch(ctx)
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index 1438a0ea2..b446d3605 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -87,12 +87,12 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream)
})
}
-func (c TransportSplitter) Put() (PutObjectStream, error) {
- return c.next.Put()
+func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) {
+ return c.next.Put(ctx)
}
-func (c TransportSplitter) Patch() (PatchObjectStream, error) {
- return c.next.Patch()
+func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) {
+ return c.next.Patch(ctx)
}
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
@@ -162,13 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error {
var newResp *object.SearchResponse
- for ln := uint64(len(ids)); ; {
+ for {
if newResp == nil {
newResp = new(object.SearchResponse)
newResp.SetBody(body)
}
- cut := min(s.addrAmount, ln)
+ cut := min(s.addrAmount, uint64(len(ids)))
body.SetIDList(ids[:cut])
newResp.SetMetaHeader(resp.GetMetaHeader())
diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go
index 92beedaa7..b10826226 100644
--- a/pkg/services/object/util/log.go
+++ b/pkg/services/object/util/log.go
@@ -1,6 +1,8 @@
package util
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -8,18 +10,10 @@ import (
)
// LogServiceError writes error message of object service to provided logger.
-func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) {
- l.Error(logs.UtilObjectServiceError,
+func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) {
+ l.Error(ctx, logs.UtilObjectServiceError,
zap.String("node", network.StringifyGroup(node)),
zap.String("request", req),
- zap.String("error", err.Error()),
- )
-}
-
-// LogWorkerPoolError writes debug error message of object worker pool to provided logger.
-func LogWorkerPoolError(l *logger.Logger, req string, err error) {
- l.Error(logs.UtilCouldNotPushTaskToWorkerPool,
- zap.String("request", req),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go
index 1bd39f9ea..f74b0aab9 100644
--- a/pkg/services/object/util/placement.go
+++ b/pkg/services/object/util/placement.go
@@ -1,7 +1,9 @@
package util
import (
+ "context"
"fmt"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -43,8 +45,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu
}
}
-func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(cnr, obj, policy)
+func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -76,8 +78,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac
}
}
-func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(cnr, obj, policy)
+func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -92,7 +94,7 @@ func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapS
}
if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) {
- vs[i] = append(vs[i][:j], vs[i][j+1:]...)
+ vs[i] = slices.Delete(vs[i], j, j+1)
j--
}
}
@@ -122,15 +124,15 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav
// GenerateTraverser generates placement Traverser for provided object address
// using epoch-th network map.
-func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
+func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
// get network map by epoch
- nm, err := g.netMapSrc.GetNetMapByEpoch(epoch)
+ nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch)
if err != nil {
return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err)
}
// get container related container
- cnr, err := g.cnrSrc.Get(idCnr)
+ cnr, err := g.cnrSrc.Get(ctx, idCnr)
if err != nil {
return nil, nil, fmt.Errorf("could not get container: %w", err)
}
@@ -160,7 +162,7 @@ func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoc
)
}
- t, err := placement.NewTraverser(traverseOpts...)
+ t, err := placement.NewTraverser(ctx, traverseOpts...)
if err != nil {
return nil, nil, err
}
diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go
index a890d5357..7242970b5 100644
--- a/pkg/services/object_manager/placement/cache_test.go
+++ b/pkg/services/object_manager/placement/cache_test.go
@@ -85,7 +85,10 @@ func TestContainerNodesCache(t *testing.T) {
})
t.Run("the error is propagated", func(t *testing.T) {
var pp netmapSDK.PlacementPolicy
- require.NoError(t, pp.DecodeString("REP 1 SELECT 1 FROM X FILTER ATTR EQ 42 AS X"))
+ r := netmapSDK.ReplicaDescriptor{}
+ r.SetNumberOfObjects(1)
+ r.SetSelectorName("Missing")
+ pp.AddReplicas(r)
c := placement.NewContainerNodesCache(size)
_, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp)
diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go
index 45e6df339..0f24a9d96 100644
--- a/pkg/services/object_manager/placement/metrics.go
+++ b/pkg/services/object_manager/placement/metrics.go
@@ -2,24 +2,90 @@ package placement
import (
"errors"
+ "fmt"
+ "maps"
+ "math"
"strings"
+ "sync"
+ "sync/atomic"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
+ locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
const (
attrPrefix = "$attribute:"
+
+ geoDistance = "$geoDistance"
)
type Metric interface {
CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int
}
-func ParseMetric(raw string) (Metric, error) {
- if attr, found := strings.CutPrefix(raw, attrPrefix); found {
- return NewAttributeMetric(attr), nil
+type metricsParser struct {
+ locodeDBPath string
+ locodes map[string]locodedb.Point
+}
+
+type MetricParser interface {
+ ParseMetrics([]string) ([]Metric, error)
+}
+
+func NewMetricsParser(locodeDBPath string) (MetricParser, error) {
+ return &metricsParser{
+ locodeDBPath: locodeDBPath,
+ }, nil
+}
+
+func (p *metricsParser) initLocodes() error {
+ if len(p.locodes) != 0 {
+ return nil
}
- return nil, errors.New("unsupported priority metric")
+ if len(p.locodeDBPath) > 0 {
+ p.locodes = make(map[string]locodedb.Point)
+ locodeDB := locodebolt.New(locodebolt.Prm{
+ Path: p.locodeDBPath,
+ },
+ locodebolt.ReadOnly(),
+ )
+ err := locodeDB.Open()
+ if err != nil {
+ return err
+ }
+ defer locodeDB.Close()
+ err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) {
+ p.locodes[k] = v
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+ return errors.New("set path to locode database")
+}
+
+func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) {
+ var metrics []Metric
+ for _, raw := range priority {
+ if attr, found := strings.CutPrefix(raw, attrPrefix); found {
+ metrics = append(metrics, NewAttributeMetric(attr))
+ } else if raw == geoDistance {
+ err := p.initLocodes()
+ if err != nil {
+ return nil, err
+ }
+ if len(p.locodes) == 0 {
+ return nil, fmt.Errorf("provide locodes database for metric %s", raw)
+ }
+ m := NewGeoDistanceMetric(p.locodes)
+ metrics = append(metrics, m)
+ } else {
+ return nil, fmt.Errorf("unsupported priority metric %s", raw)
+ }
+ }
+ return metrics, nil
}
// attributeMetric describes priority metric based on attribute.
@@ -41,3 +107,79 @@ func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.Node
func NewAttributeMetric(attr string) Metric {
return &attributeMetric{attribute: attr}
}
+
+// geoDistanceMetric describes priority metric based on attribute.
+type geoDistanceMetric struct {
+ locodes map[string]locodedb.Point
+ distance *atomic.Pointer[map[string]int]
+ mtx sync.Mutex
+}
+
+func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric {
+ d := atomic.Pointer[map[string]int]{}
+ m := make(map[string]int)
+ d.Store(&m)
+ gm := &geoDistanceMetric{
+ locodes: locodes,
+ distance: &d,
+ }
+ return gm
+}
+
+// CalculateValue return distance in kilometers between current node and provided,
+// if coordinates for provided node found. In other case return math.MaxInt.
+func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
+ fl := from.LOCODE()
+ tl := to.LOCODE()
+ if fl == tl {
+ return 0
+ }
+ m := gm.distance.Load()
+ if v, ok := (*m)[fl+tl]; ok {
+ return v
+ }
+ return gm.calculateDistance(fl, tl)
+}
+
+func (gm *geoDistanceMetric) calculateDistance(from, to string) int {
+ gm.mtx.Lock()
+ defer gm.mtx.Unlock()
+ od := gm.distance.Load()
+ if v, ok := (*od)[from+to]; ok {
+ return v
+ }
+ nd := maps.Clone(*od)
+ var dist int
+ pointFrom, okFrom := gm.locodes[from]
+ pointTo, okTo := gm.locodes[to]
+ if okFrom && okTo {
+ dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude()))
+ } else {
+ dist = math.MaxInt
+ }
+ nd[from+to] = dist
+ gm.distance.Store(&nd)
+
+ return dist
+}
+
+// distance return amount of KM between two points.
+// Parameters are latitude and longitude of point 1 and 2 in decimal degrees.
+// Original implementation can be found here https://www.geodatasource.com/developers/go.
+func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 {
+ radLat1 := math.Pi * lt1 / 180
+ radLat2 := math.Pi * lt2 / 180
+ radTheta := math.Pi * (ln1 - ln2) / 180
+
+ dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta)
+
+ if dist > 1 {
+ dist = 1
+ }
+
+ dist = math.Acos(dist)
+ dist = dist * 180 / math.Pi
+ dist = dist * 60 * 1.1515 * 1.609344
+
+ return dist
+}
diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go
index 1782e27ea..b3f8d9c03 100644
--- a/pkg/services/object_manager/placement/netmap.go
+++ b/pkg/services/object_manager/placement/netmap.go
@@ -1,6 +1,7 @@
package placement
import (
+ "context"
"crypto/sha256"
"fmt"
@@ -35,12 +36,12 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder {
}
}
-func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) {
+func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) {
return s.nm, nil
}
-func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- nm, err := netmap.GetLatestNetworkMap(b.nmSrc)
+func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc)
if err != nil {
return nil, fmt.Errorf("could not get network map: %w", err)
}
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index 6440f187d..a3f9af959 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -1,6 +1,7 @@
package placement
import (
+ "context"
"errors"
"fmt"
"slices"
@@ -21,7 +22,7 @@ type Builder interface {
//
// Must return all container nodes if object identifier
// is nil.
- BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
+ BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
}
type NodeState interface {
@@ -78,7 +79,7 @@ func defaultCfg() *cfg {
}
// NewTraverser creates, initializes with options and returns Traverser instance.
-func NewTraverser(opts ...Option) (*Traverser, error) {
+func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
cfg := defaultCfg()
for i := range opts {
@@ -98,7 +99,7 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy)
}
- ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy)
+ ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy)
if err != nil {
return nil, fmt.Errorf("could not build placement: %w", err)
}
@@ -114,15 +115,13 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
var unsortedVector []netmap.NodeInfo
var regularVector []netmap.NodeInfo
for i := range rem {
- unsortedVector = append(unsortedVector, ns[i][:rem[i]]...)
- regularVector = append(regularVector, ns[i][rem[i]:]...)
+ pivot := min(len(ns[i]), rem[i])
+ unsortedVector = append(unsortedVector, ns[i][:pivot]...)
+ regularVector = append(regularVector, ns[i][pivot:]...)
}
rem = []int{-1, -1}
- sortedVector, err := sortVector(cfg, unsortedVector)
- if err != nil {
- return nil, err
- }
+ sortedVector := sortVector(cfg, unsortedVector)
ns = [][]netmap.NodeInfo{sortedVector, regularVector}
} else if cfg.flatSuccess != nil {
ns = flatNodes(ns)
@@ -187,7 +186,7 @@ type nodeMetrics struct {
metrics []int
}
-func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, error) {
+func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo {
nm := make([]nodeMetrics, len(unsortedVector))
node := cfg.nodeState.LocalNodeInfo()
@@ -201,14 +200,14 @@ func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo,
metrics: m,
}
}
- slices.SortFunc(nm, func(a, b nodeMetrics) int {
+ slices.SortStableFunc(nm, func(a, b nodeMetrics) int {
return slices.Compare(a.metrics, b.metrics)
})
sortedVector := make([]netmap.NodeInfo, len(unsortedVector))
for i := range unsortedVector {
sortedVector[i] = unsortedVector[nm[i].index]
}
- return sortedVector, nil
+ return sortedVector
}
// Node is a descriptor of storage node with information required for intra-container communication.
@@ -289,8 +288,8 @@ func (t *Traverser) Next() []Node {
func (t *Traverser) skipEmptyVectors() {
for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body
if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 {
- t.vectors = append(t.vectors[:i], t.vectors[i+1:]...)
- t.rem = append(t.rem[:i], t.rem[i+1:]...)
+ t.vectors = slices.Delete(t.vectors, i, i+1)
+ t.rem = slices.Delete(t.rem, i, i+1)
i--
} else {
break
diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go
index 38f62aa07..d1370f21e 100644
--- a/pkg/services/object_manager/placement/traverser_test.go
+++ b/pkg/services/object_manager/placement/traverser_test.go
@@ -1,6 +1,8 @@
package placement
import (
+ "context"
+ "slices"
"strconv"
"testing"
@@ -17,7 +19,7 @@ type testBuilder struct {
vectors [][]netmap.NodeInfo
}
-func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return b.vectors, nil
}
@@ -33,8 +35,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
vc := make([][]netmap.NodeInfo, 0, len(v))
for i := range v {
- ns := make([]netmap.NodeInfo, len(v[i]))
- copy(ns, v[i])
+ ns := slices.Clone(v[i])
vc = append(vc, ns)
}
@@ -102,7 +103,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithoutSuccessTracking(),
@@ -131,7 +132,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -160,7 +161,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
)
@@ -201,7 +202,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodes, cnr := testPlacement(selectors, replicas)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local)
@@ -276,7 +277,7 @@ func TestTraverserRemValues(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithCopyNumbers(testCase.copyNumbers),
@@ -322,7 +323,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
m := []Metric{NewAttributeMetric("ClusterName")}
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -356,6 +357,52 @@ func TestTraverserPriorityMetrics(t *testing.T) {
require.Nil(t, next)
})
+ t.Run("one rep one metric fewer nodes", func(t *testing.T) {
+ selectors := []int{2}
+ replicas := []int{3}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "B")
+
+ sdkNode := testNode(5)
+ sdkNode.SetAttribute("ClusterName", "B")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{NewAttributeMetric("ClusterName")}
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `ClusterName` the order will be:
+ // [ {Node_0 A}, {Node_1 A} ]
+ // With priority metric `ClusterName` and current node in cluster B
+ // the order should be:
+ // [ {Node_1 B}, {Node_0 A} ]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
t.Run("two reps two metrics", func(t *testing.T) {
selectors := []int{3, 3}
replicas := []int{2, 2}
@@ -399,7 +446,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
NewAttributeMetric("UN-LOCODE"),
}
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -437,7 +484,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
nodesCopy = copyVectors(nodes)
- tr, err = NewTraverser(
+ tr, err = NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -470,7 +517,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
nodesCopy = copyVectors(nodes)
- tr, err = NewTraverser(
+ tr, err = NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -521,7 +568,7 @@ func TestTraverserPriorityMetrics(t *testing.T) {
m := []Metric{NewAttributeMetric("ClusterName")}
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -554,4 +601,53 @@ func TestTraverserPriorityMetrics(t *testing.T) {
next = tr.Next()
require.Nil(t, next)
})
+
+ t.Run("one rep one geo metric", func(t *testing.T) {
+ t.Skip()
+ selectors := []int{2}
+ replicas := []int{2}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("UN-LOCODE", "RU LED")
+
+ sdkNode := testNode(2)
+ sdkNode.SetAttribute("UN-LOCODE", "FI HEL")
+
+ nodesCopy := copyVectors(nodes)
+
+ parser, err := NewMetricsParser("/path/to/locode_db")
+ require.NoError(t, err)
+ m, err := parser.ParseMetrics([]string{geoDistance})
+ require.NoError(t, err)
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `$geoDistance` the order will be:
+ // [ {Node_0 RU MOW}, {Node_1 RU LED}]
+ // With priority metric `$geoDistance` the order should be:
+ // [ {Node_1 RU LED}, {Node_0 RU MOW}]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
}
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index 7476dbd48..e5f001d5a 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -57,14 +57,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
ts, err := g.tsSource.Tombstone(ctx, a, epoch)
if err != nil {
- log.Warn(
+ log.Warn(ctx,
logs.TombstoneCouldNotGetTheTombstoneTheSource,
zap.Error(err),
)
- } else {
- if ts != nil {
- return g.handleTS(addrStr, ts, epoch)
- }
+ } else if ts != nil {
+ return g.handleTS(ctx, addrStr, ts, epoch)
}
// requested tombstone not
@@ -72,12 +70,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
return false
}
-func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
+func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
for _, atr := range ts.Attributes() {
if atr.Key() == objectV2.SysAttributeExpEpoch {
epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
if err != nil {
- g.log.Warn(
+ g.log.Warn(ctx,
logs.TombstoneExpirationParseFailure,
zap.Error(err),
)
diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go
index 9d33e8179..2147a32fe 100644
--- a/pkg/services/object_manager/tombstone/constructor.go
+++ b/pkg/services/object_manager/tombstone/constructor.go
@@ -3,6 +3,7 @@ package tombstone
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
@@ -23,7 +24,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- log: &logger.Logger{Logger: zap.NewNop()},
+ log: logger.NewLoggerWrapper(zap.NewNop()),
cacheSize: defaultLRUCacheSize,
}
}
@@ -49,9 +50,7 @@ func NewChecker(oo ...Option) *ExpirationChecker {
panicOnNil(cfg.tsSource, "Tombstone source")
cache, err := lru.New[string, uint64](cfg.cacheSize)
- if err != nil {
- panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err))
- }
+ assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize))
return &ExpirationChecker{
cache: cache,
diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go
index 1ff07b05a..975941847 100644
--- a/pkg/services/object_manager/tombstone/source/source.go
+++ b/pkg/services/object_manager/tombstone/source/source.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -38,9 +39,7 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) {
// Panics if any of the provided options does not allow
// constructing a valid tombstone local Source.
func NewSource(p TombstoneSourcePrm) Source {
- if p.s == nil {
- panic("Tombstone source: nil object service")
- }
+ assert.False(p.s == nil, "Tombstone source: nil object service")
return Source(p)
}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index dbc9ea53c..dcaaec0b4 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -28,10 +28,10 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
))
defer span.End()
- cnr, err := p.cnrSrc.Get(objInfo.Address.Container())
+ cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container())
if err != nil {
if client.IsErrContainerNotFound(err) {
- existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, objInfo.Address.Container())
+ existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container())
if errWasRemoved != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved)
} else if existed {
@@ -56,7 +56,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
idObj := objInfo.Address.Object()
idCnr := objInfo.Address.Container()
- nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy)
+ nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -86,7 +86,7 @@ func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectc
}
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", objInfo.Address),
)
@@ -110,6 +110,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
// Number of copies that are stored on maintenance nodes.
var uncheckedCopies int
+ var candidates []netmap.NodeInfo
for i := 0; shortage > 0 && i < len(nodes); i++ {
select {
case <-ctx.Done():
@@ -117,71 +118,68 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
default:
}
- if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) {
- requirements.needLocalCopy = true
-
- shortage--
- } else if nodes[i].Status().IsMaintenance() {
- shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
- } else {
- if status := checkedNodes.processStatus(nodes[i]); status.Processed() {
- if status == nodeHoldsObject {
- // node already contains replica, no need to replicate
- nodes = append(nodes[:i], nodes[i+1:]...)
- i--
- shortage--
- }
-
+ var err error
+ st := checkedNodes.processStatus(nodes[i])
+ if !st.Processed() {
+ st, err = p.checkStatus(ctx, addr, nodes[i])
+ checkedNodes.set(nodes[i], st)
+ if st == nodeDoesNotHoldObject {
+ // 1. This is the first time the node is encountered (`!st.Processed()`).
+ // 2. The node does not hold object (`st == nodeDoesNotHoldObject`).
+ // So we need to try to put an object to it.
+ candidates = append(candidates, nodes[i])
continue
}
-
- callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
-
- _, err := p.remoteHeader(callCtx, nodes[i], addr, false)
-
- cancel()
-
- if err == nil {
- shortage--
- checkedNodes.submitReplicaHolder(nodes[i])
- } else {
- if client.IsErrObjectNotFound(err) {
- checkedNodes.submitReplicaCandidate(nodes[i])
- continue
- } else if client.IsErrNodeUnderMaintenance(err) {
- shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
- } else {
- p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
- zap.Stringer("object", addr),
- zap.String("error", err.Error()),
- )
- }
- }
}
- nodes = append(nodes[:i], nodes[i+1:]...)
- i--
+ switch st {
+ case nodeIsLocal:
+ requirements.needLocalCopy = true
+
+ shortage--
+ case nodeIsUnderMaintenance:
+ shortage--
+ uncheckedCopies++
+
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK,
+ zap.String("node", netmap.StringifyPublicKey(nodes[i])))
+ case nodeHoldsObject:
+ shortage--
+ case nodeDoesNotHoldObject:
+ case nodeStatusUnknown:
+ p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
+ zap.Stringer("object", addr),
+ zap.Error(err))
+ default:
+ panic("unreachable")
+ }
}
- p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
+ p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies)
}
-// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
-//
-// consider remote nodes under maintenance as problem OK. Such
-// nodes MAY not respond with object, however, this is how we
-// prevent spam with new replicas.
-// However, additional copies should not be removed in this case,
-// because we can remove the only copy this way.
-func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
- checkedNodes.submitReplicaHolder(node)
- shortage--
- uncheckedCopies++
+func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) {
+ if p.netmapKeys.IsLocalKey(node.PublicKey()) {
+ return nodeIsLocal, nil
+ }
+ if node.Status().IsMaintenance() {
+ return nodeIsUnderMaintenance, nil
+ }
- p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK,
- zap.String("node", netmap.StringifyPublicKey(node)),
- )
- return shortage, uncheckedCopies
+ callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
+ _, err := p.remoteHeader(callCtx, node, addr, false)
+ cancel()
+
+ if err == nil {
+ return nodeHoldsObject, nil
+ }
+ if client.IsErrObjectNotFound(err) {
+ return nodeDoesNotHoldObject, nil
+ }
+ if client.IsErrNodeUnderMaintenance(err) {
+ return nodeIsUnderMaintenance, nil
+ }
+ return nodeStatusUnknown, err
}
func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
@@ -189,7 +187,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
) {
switch {
case shortage > 0:
- p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected,
+ p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected,
zap.Stringer("object", addr),
zap.Uint32("shortage", shortage),
)
@@ -205,7 +203,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
case uncheckedCopies > 0:
// If we have more copies than needed, but some of them are from the maintenance nodes,
// save the local copy.
- p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
+ p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
zap.Int("count", uncheckedCopies))
case uncheckedCopies == 0:
diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go
index d4c7ccbf9..69879c439 100644
--- a/pkg/services/policer/check_test.go
+++ b/pkg/services/policer/check_test.go
@@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) {
cache.SubmitSuccessfulReplication(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
- cache.submitReplicaCandidate(node)
+ cache.set(node, nodeDoesNotHoldObject)
require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject)
- cache.submitReplicaHolder(node)
+ cache.set(node, nodeHoldsObject)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
}
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index 6d2c153c9..fbdeb3148 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -39,7 +39,7 @@ func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectco
// All of them must be stored on all of the container nodes.
func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
objID := objInfo.Address.Object()
- nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy)
+ nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -59,7 +59,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes)
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", objInfo.Address),
)
@@ -69,7 +69,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
}
func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
- nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
+ nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -91,7 +91,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
if res.removeLocal {
- p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
p.cbRedundantCopy(ctx, objInfo.Address)
}
return nil
@@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
var removeLocalChunk bool
requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
- if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
// current node is required node, we are happy
return ecChunkProcessResult{
validPlacement: true,
@@ -109,7 +109,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
}
if requiredNode.Status().IsMaintenance() {
// consider maintenance mode has object, but do not drop local copy
- p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
return ecChunkProcessResult{}
}
@@ -120,7 +120,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
if err == nil {
removeLocalChunk = true
} else if client.IsErrObjectNotFound(err) {
- p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
+ p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
task := replicator.Task{
NumCopies: 1,
Addr: objInfo.Address,
@@ -129,9 +129,9 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
p.replicator.HandleReplicationTask(ctx, task, newNodeCache())
} else if client.IsErrNodeUnderMaintenance(err) {
// consider maintenance mode has object, but do not drop local copy
- p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
} else {
- p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error()))
+ p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err))
}
return ecChunkProcessResult{
@@ -146,13 +146,13 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I
requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo)
if len(requiredChunkIndexes) == 0 {
- p.log.Info(logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
+ p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
return true
}
err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes)
if err != nil {
- p.log.Error(logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
return false
}
if len(requiredChunkIndexes) == 0 {
@@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec
if uint32(i) == objInfo.ECInfo.Total {
break
}
- if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
}
}
@@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad
func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
var eiErr *objectSDK.ECInfoError
for _, n := range nodes {
- if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
continue
}
_, err := p.remoteHeader(ctx, n, parentAddress, true)
@@ -224,11 +224,11 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
var chunkID oid.ID
if err := chunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
return false
}
if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID {
- p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
+ p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
return false
}
@@ -239,7 +239,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
for index, candidates := range required {
if len(candidates) == 0 {
- p.log.Error(logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
+ p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
return false
}
}
@@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
return
}
var err error
- if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
_, err = p.localHeader(ctx, parentAddress)
} else {
_, err = p.remoteHeader(ctx, n, parentAddress, true)
@@ -271,18 +271,20 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
resolved[ch.Index] = append(resolved[ch.Index], n)
var ecInfoChunkID oid.ID
if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
return
}
if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID {
- p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
+ p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
return
}
chunkIDs[ch.Index] = ecInfoChunkID
}
- } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
- p.log.Warn(logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
+ } else if client.IsErrObjectAlreadyRemoved(err) {
+ restore = false
+ } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
+ p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
p.replicator.HandleReplicationTask(ctx, replicator.Task{
NumCopies: 1,
Addr: objInfo.Address,
@@ -299,7 +301,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
for i := range resolved {
found = append(found, i)
}
- p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
+ p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
return
}
p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
@@ -310,7 +312,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
) {
c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
if err != nil {
- p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs)
@@ -319,7 +321,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
}
key, err := p.keyStorage.GetKey(nil)
if err != nil {
- p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
required := make([]bool, len(parts))
@@ -329,7 +331,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
}
}
if err := c.ReconstructParts(parts, required, key); err != nil {
- p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
for idx, part := range parts {
@@ -341,7 +343,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
pID, _ := part.ID()
addr.SetObject(pID)
targetNode := nodes[idx%len(nodes)]
- if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
p.replicator.HandleLocalPutTask(ctx, replicator.Task{
Addr: addr,
Obj: part,
@@ -369,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
var obj *objectSDK.Object
var err error
for _, node := range nodes {
- if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(node.PublicKey()) {
obj, err = p.localObject(egCtx, objID)
} else {
obj, err = p.remoteObject(egCtx, node, objID)
@@ -377,7 +379,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
if err == nil {
break
}
- p.log.Warn(logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
+ p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
}
if obj != nil {
parts[idx] = obj
@@ -386,7 +388,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
})
}
if err := errGroup.Wait(); err != nil {
- p.log.Error(logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
return nil
}
return parts
diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go
index e230153f9..c6980536b 100644
--- a/pkg/services/policer/ec_test.go
+++ b/pkg/services/policer/ec_test.go
@@ -36,7 +36,7 @@ func TestECChunkHasValidPlacement(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(chunkAddress.Container()) {
return cnr, nil
}
@@ -123,7 +123,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(chunkAddress.Container()) {
return cnr, nil
}
@@ -448,7 +448,7 @@ func TestECChunkRestore(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(parentAddress.Container()) {
return cnr, nil
}
@@ -599,7 +599,7 @@ func TestECChunkRestoreNodeOff(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(parentAddress.Container()) {
return cnr, nil
}
diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go
index cd47cb0fc..c2157de5d 100644
--- a/pkg/services/policer/nodecache.go
+++ b/pkg/services/policer/nodecache.go
@@ -8,6 +8,9 @@ const (
nodeNotProcessed nodeProcessStatus = iota
nodeDoesNotHoldObject
nodeHoldsObject
+ nodeStatusUnknown
+ nodeIsUnderMaintenance
+ nodeIsLocal
)
func (st nodeProcessStatus) Processed() bool {
@@ -15,37 +18,19 @@ func (st nodeProcessStatus) Processed() bool {
}
// nodeCache tracks Policer's check progress.
-type nodeCache map[uint64]bool
+type nodeCache map[uint64]nodeProcessStatus
func newNodeCache() nodeCache {
- return make(map[uint64]bool)
+ return make(map[uint64]nodeProcessStatus)
}
-func (n nodeCache) set(node netmap.NodeInfo, val bool) {
+func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) {
n[node.Hash()] = val
}
-// submits storage node as a candidate to store the object replica in case of
-// shortage.
-func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) {
- n.set(node, false)
-}
-
-// submits storage node as a current object replica holder.
-func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) {
- n.set(node, true)
-}
-
// processStatus returns current processing status of the storage node.
func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
- switch val, ok := n[node.Hash()]; {
- case !ok:
- return nodeNotProcessed
- case val:
- return nodeHoldsObject
- default:
- return nodeDoesNotHoldObject
- }
+ return n[node.Hash()]
}
// SubmitSuccessfulReplication marks given storage node as a current object
@@ -53,5 +38,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
//
// SubmitSuccessfulReplication implements replicator.TaskResult.
func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
- n.submitReplicaHolder(node)
+ n.set(node, nodeHoldsObject)
}
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
index 336f7a0ab..5d59604c2 100644
--- a/pkg/services/policer/option.go
+++ b/pkg/services/policer/option.go
@@ -91,7 +91,7 @@ type cfg struct {
func defaultCfg() *cfg {
return &cfg{
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
batchSize: 10,
cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB
sleepDuration: 1 * time.Second,
diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go
index 363c0b922..c91e7cc7c 100644
--- a/pkg/services/policer/policer.go
+++ b/pkg/services/policer/policer.go
@@ -1,13 +1,13 @@
package policer
import (
+ "fmt"
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
- "go.uber.org/zap"
)
type objectsInWork struct {
@@ -55,12 +55,8 @@ func New(opts ...Option) *Policer {
opts[i](c)
}
- c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))}
-
cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize))
- if err != nil {
- panic(err)
- }
+ assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize))
return &Policer{
cfg: c,
diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go
index 4e17e98a8..049c33753 100644
--- a/pkg/services/policer/policer_test.go
+++ b/pkg/services/policer/policer_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
+ "slices"
"sort"
"testing"
"time"
@@ -36,10 +37,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) {
// Container source and bury function
buryCh := make(chan oid.Address)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -78,6 +79,7 @@ func TestProcessObject(t *testing.T) {
maintenanceNodes []int
wantRemoveRedundant bool
wantReplicateTo []int
+ headResult map[int]error
ecInfo *objectcore.ECInfo
}{
{
@@ -127,7 +129,7 @@ func TestProcessObject(t *testing.T) {
nodeCount: 2,
policy: `REP 2 REP 2`,
placement: [][]int{{0, 1}, {0, 1}},
- wantReplicateTo: []int{1, 1}, // is this actually good?
+ wantReplicateTo: []int{1},
},
{
desc: "lock object must be replicated to all nodes",
@@ -145,6 +147,14 @@ func TestProcessObject(t *testing.T) {
objHolders: []int{1},
maintenanceNodes: []int{2},
},
+ {
+ desc: "preserve local copy when node response with MAINTENANCE",
+ nodeCount: 3,
+ policy: `REP 2`,
+ placement: [][]int{{1, 2}},
+ objHolders: []int{1},
+ headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)},
+ },
{
desc: "lock object must be replicated to all EC nodes",
objType: objectSDK.TypeLock,
@@ -161,6 +171,14 @@ func TestProcessObject(t *testing.T) {
placement: [][]int{{0, 1, 2}},
wantReplicateTo: []int{1, 2},
},
+ {
+ desc: "do not remove local copy when MAINTENANCE status is cached",
+ objType: objectSDK.TypeRegular,
+ nodeCount: 3,
+ policy: `REP 1 REP 1`,
+ placement: [][]int{{1, 2}, {1, 0}},
+ headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)},
+ },
}
for i := range tests {
@@ -204,11 +222,14 @@ func TestProcessObject(t *testing.T) {
t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a)
return nil, errors.New("unexpected object head")
}
- for _, i := range ti.objHolders {
- if index == i {
- return nil, nil
+ if ti.headResult != nil {
+ if err, ok := ti.headResult[index]; ok {
+ return nil, err
}
}
+ if slices.Contains(ti.objHolders, index) {
+ return nil, nil
+ }
return nil, new(apistatus.ObjectNotFound)
}
@@ -217,14 +238,14 @@ func TestProcessObject(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(addr.Container()) {
return cnr, nil
}
t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container())
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -282,10 +303,10 @@ func TestProcessObjectError(t *testing.T) {
cnr := &container.Container{}
cnr.Value.Init()
source := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return nil, new(apistatus.ContainerNotFound)
},
}
@@ -330,10 +351,10 @@ func TestIteratorContract(t *testing.T) {
}
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -422,18 +443,22 @@ func (it *sliceKeySpaceIterator) Rewind() {
}
type containerSrc struct {
- get func(id cid.ID) (*container.Container, error)
- deletionInfo func(id cid.ID) (*container.DelInfo, error)
+ get func(ctx context.Context, id cid.ID) (*container.Container, error)
+ deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error)
}
-func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) }
+func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
+ return f.get(ctx, id)
+}
-func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) }
+func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ return f.deletionInfo(ctx, id)
+}
// placementBuilderFunc is a placement.Builder backed by a function
type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
-func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return f(c, o, p)
}
diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go
index a5ebb0010..635a5683b 100644
--- a/pkg/services/policer/process.go
+++ b/pkg/services/policer/process.go
@@ -7,17 +7,20 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
func (p *Policer) Run(ctx context.Context) {
p.shardPolicyWorker(ctx)
- p.log.Info(logs.PolicerRoutineStopped)
+ p.log.Info(ctx, logs.PolicerRoutineStopped)
}
func (p *Policer) shardPolicyWorker(ctx context.Context) {
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String())
for {
select {
case <-ctx.Done():
@@ -33,7 +36,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
continue
}
- p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
+ p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
}
skipMap := newSkipMap()
@@ -59,9 +62,9 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
if p.objsInWork.add(addr.Address) {
err := p.processObject(ctx, addr)
if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
- p.log.Error(logs.PolicerUnableToProcessObj,
+ p.log.Error(ctx, logs.PolicerUnableToProcessObj,
zap.Stringer("object", addr.Address),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
p.cache.Add(addr.Address, time.Now())
p.objsInWork.remove(addr.Address)
@@ -69,7 +72,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
}
})
if err != nil {
- p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err))
+ p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err))
}
}
}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 7e5c6e093..8c6f0df06 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
@@ -27,7 +26,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(logs.ReplicatorFinishWork,
+ p.log.Debug(ctx, logs.ReplicatorFinishWork,
zap.Uint32("amount of unfinished replicas", task.NumCopies),
)
}()
@@ -43,10 +42,9 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
var err error
task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr)
if err != nil {
- p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return
}
@@ -65,7 +63,6 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
log := p.log.With(
zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])),
zap.Stringer("object", task.Addr),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
callCtx, cancel := context.WithTimeout(ctx, p.putTimeout)
@@ -75,11 +72,11 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
cancel()
if err != nil {
- log.Error(logs.ReplicatorCouldNotReplicateObject,
- zap.String("error", err.Error()),
+ log.Error(ctx, logs.ReplicatorCouldNotReplicateObject,
+ zap.Error(err),
)
} else {
- log.Debug(logs.ReplicatorObjectSuccessfullyReplicated)
+ log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated)
task.NumCopies--
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
index 7e7090237..216fe4919 100644
--- a/pkg/services/replicator/pull.go
+++ b/pkg/services/replicator/pull.go
@@ -3,12 +3,12 @@ package replicator
import (
"context"
"errors"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
@@ -22,7 +22,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
}()
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask",
@@ -43,31 +43,24 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
if err == nil {
break
}
- var endpoints []string
- node.IterateNetworkEndpoints(func(s string) bool {
- endpoints = append(endpoints, s)
- return false
- })
- p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ endpoints := slices.Collect(node.NetworkEndpoints())
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
zap.Stringer("object", task.Addr),
zap.Error(err),
- zap.Strings("endpoints", endpoints),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Strings("endpoints", endpoints))
}
if obj == nil {
- p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
zap.Stringer("object", task.Addr),
- zap.Error(errFailedToGetObjectFromAnyNode),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(errFailedToGetObjectFromAnyNode))
return
}
err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
- p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
}
}
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
index 537833516..bcad8471d 100644
--- a/pkg/services/replicator/put.go
+++ b/pkg/services/replicator/put.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -20,7 +19,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
}()
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask",
@@ -31,18 +30,16 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
defer span.End()
if task.Obj == nil {
- p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(errObjectNotDefined),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(errObjectNotDefined))
return
}
err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
- p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
}
}
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index f2f86daf0..a940cef37 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -7,7 +7,6 @@ import (
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
// Replicator represents the utility that replicates
@@ -45,8 +44,6 @@ func New(opts ...Option) *Replicator {
opts[i](c)
}
- c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))}
-
return &Replicator{
cfg: c,
}
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index e914119b4..f0591de71 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -33,10 +33,7 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log
}
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- s.log.Debug(logs.ServingRequest,
- zap.String("component", "SessionService"),
- zap.String("request", "Create"),
- )
+ s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create"))
respBody, err := s.exec.Create(ctx, req.GetBody())
if err != nil {
diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go
index 411734ea1..60db97f90 100644
--- a/pkg/services/session/storage/persistent/options.go
+++ b/pkg/services/session/storage/persistent/options.go
@@ -19,7 +19,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- l: &logger.Logger{Logger: zap.L()},
+ l: logger.NewLoggerWrapper(zap.L()),
timeout: 100 * time.Millisecond,
}
}
diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go
index 71711e371..132d62445 100644
--- a/pkg/services/session/storage/persistent/storage.go
+++ b/pkg/services/session/storage/persistent/storage.go
@@ -1,6 +1,7 @@
package persistent
import (
+ "context"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
@@ -63,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
// enable encryption if it
// was configured so
if cfg.privateKey != nil {
- rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8)
+ rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8)
cfg.privateKey.D.FillBytes(rawKey)
c, err := aes.NewCipher(rawKey)
@@ -105,7 +106,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok
return err
})
if err != nil {
- s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage,
+ s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage,
zap.Error(err),
zap.Stringer("ownerID", ownerID),
zap.String("tokenID", hex.EncodeToString(tokenID)),
@@ -130,7 +131,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
if epochFromToken(v) <= epoch {
err = c.Delete()
if err != nil {
- s.l.Error(logs.PersistentCouldNotDeleteSToken,
+ s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken,
zap.String("token_id", hex.EncodeToString(k)),
)
}
@@ -141,7 +142,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
})
})
if err != nil {
- s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens,
+ s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens,
zap.Uint64("epoch", epoch),
)
}
diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go
index d531b25cb..423e579d7 100644
--- a/pkg/services/session/storage/temporary/executor.go
+++ b/pkg/services/session/storage/temporary/executor.go
@@ -38,7 +38,7 @@ func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody)
s.mtx.Lock()
s.tokens[key{
tokenID: base58.Encode(uidBytes),
- ownerID: base58.Encode(id.WalletBytes()),
+ ownerID: id.EncodeToString(),
}] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration())
s.mtx.Unlock()
diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go
index 9ae9db9dc..c9da6b842 100644
--- a/pkg/services/session/storage/temporary/storage.go
+++ b/pkg/services/session/storage/temporary/storage.go
@@ -41,7 +41,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken
s.mtx.RLock()
t := s.tokens[key{
tokenID: base58.Encode(tokenID),
- ownerID: base58.Encode(ownerID.WalletBytes()),
+ ownerID: ownerID.EncodeToString(),
}]
s.mtx.RUnlock()
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index 69cf59405..58757ff6d 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -22,7 +22,7 @@ import (
)
func (s *Service) newAPERequest(ctx context.Context, namespace string,
- cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+ cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) (aperequest.Request, error) {
schemaMethod, err := converter.SchemaMethodFromACLOperation(operation)
if err != nil {
@@ -36,7 +36,7 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()),
nativeschema.PropertyKeyActorRole: schemaRole,
}
- reqProps, err = s.fillWithUserClaimTags(reqProps, publicKey)
+ reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey)
if err != nil {
return aperequest.Request{}, err
}
@@ -53,15 +53,19 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
}
+ resProps := map[string]string{
+ nativeschema.ProperyKeyTreeID: treeID,
+ }
+
return aperequest.NewRequest(
schemaMethod,
- aperequest.NewResource(resourceName, make(map[string]string)),
+ aperequest.NewResource(resourceName, resProps),
reqProps,
), nil
}
func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
- container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+ container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) error {
namespace := ""
cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns")
@@ -69,28 +73,27 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
namespace = cntNamespace
}
- request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey)
+ request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey)
if err != nil {
return fmt.Errorf("failed to create ape request: %w", err)
}
- return s.apeChecker.CheckAPE(checkercore.CheckPrm{
+ return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{
Request: request,
Namespace: namespace,
Container: cid,
ContainerOwner: container.Value.Owner(),
PublicKey: publicKey,
BearerToken: bt,
- SoftAPECheck: false,
})
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (s *Service) fillWithUserClaimTags(reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
+func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
- props, err := aperequest.FormFrostfsIDRequestProperties(s.frostfsidSubjectProvider, publicKey)
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go
index 3f94925b5..7b209fd47 100644
--- a/pkg/services/tree/ape_test.go
+++ b/pkg/services/tree/ape_test.go
@@ -37,7 +37,7 @@ type frostfsIDProviderMock struct {
subjectsExtended map[util.Uint160]*client.SubjectExtended
}
-func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) {
+func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
v, ok := f.subjects[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -45,7 +45,7 @@ func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, e
return v, nil
}
-func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
v, ok := f.subjectsExtended[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -107,6 +107,45 @@ func TestCheckAPE(t *testing.T) {
cid := cid.ID{}
_ = cid.DecodeString(containerID)
+ t.Run("treeID rule", func(t *testing.T) {
+ los := inmemory.NewInmemoryLocalStorage()
+ mcs := inmemory.NewInmemoryMorphRuleChainStorage()
+ fid := newFrostfsIDProviderMock(t)
+ s := Service{
+ cfg: cfg{
+ frostfsidSubjectProvider: fid,
+ },
+ apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
+ }
+
+ mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.QuotaLimitReached,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindResource,
+ Key: nativeschema.ProperyKeyTreeID,
+ Value: versionTreeID,
+ },
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey())
+
+ var chErr *checkercore.ChainRouterError
+ require.ErrorAs(t, err, &chErr)
+ require.Equal(t, chain.QuotaLimitReached, chErr.Status())
+ })
+
t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) {
los := inmemory.NewInmemoryLocalStorage()
mcs := inmemory.NewInmemoryMorphRuleChainStorage()
@@ -152,7 +191,7 @@ func TestCheckAPE(t *testing.T) {
MatchType: chain.MatchTypeFirstMatch,
})
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
require.NoError(t, err)
})
@@ -201,7 +240,7 @@ func TestCheckAPE(t *testing.T) {
MatchType: chain.MatchTypeFirstMatch,
})
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
require.NoError(t, err)
})
}
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index ac80d0e4c..a11700771 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -10,12 +10,9 @@ import (
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
- tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials/insecure"
)
type clientCache struct {
@@ -51,7 +48,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
c.Lock()
- ccInt, ok := c.LRU.Get(netmapAddr)
+ ccInt, ok := c.Get(netmapAddr)
c.Unlock()
if ok {
@@ -69,14 +66,19 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
}
}
- cc, err := c.dialTreeService(ctx, netmapAddr)
+ var netAddr network.Address
+ if err := netAddr.FromString(netmapAddr); err != nil {
+ return nil, err
+ }
+
+ cc, err := dialTreeService(ctx, netAddr, c.key, c.ds)
lastTry := time.Now()
c.Lock()
if err != nil {
- c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
+ c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
} else {
- c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
+ c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
}
c.Unlock()
@@ -86,48 +88,3 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
return NewTreeServiceClient(cc), nil
}
-
-func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
- var netAddr network.Address
- if err := netAddr.FromString(netmapAddr); err != nil {
- return nil, err
- }
-
- opts := []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(
- metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInteceptor(),
- ),
- grpc.WithChainStreamInterceptor(
- metrics.NewStreamClientInterceptor(),
- tracing.NewStreamClientInterceptor(),
- ),
- grpc.WithContextDialer(c.ds.GrpcContextDialer()),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- }
-
- if !netAddr.IsTLSEnabled() {
- opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
- }
-
- req := &HealthcheckRequest{
- Body: &HealthcheckRequest_Body{},
- }
- if err := SignMessage(req, c.key); err != nil {
- return nil, err
- }
-
- cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
- if err != nil {
- return nil, err
- }
-
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- defer cancel()
- // perform some request to check connection
- if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
- _ = cc.Close()
- return nil, err
- }
- return cc, nil
-}
diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go
index 435257550..c641a21a2 100644
--- a/pkg/services/tree/container.go
+++ b/pkg/services/tree/container.go
@@ -2,6 +2,7 @@ package tree
import (
"bytes"
+ "context"
"crypto/sha256"
"fmt"
"sync"
@@ -32,13 +33,13 @@ type containerCacheItem struct {
const defaultContainerCacheSize = 10
// getContainerNodes returns nodes in the container and a position of local key in the list.
-func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
- nm, err := s.nmSource.GetNetMap(0)
+func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
+ nm, err := s.nmSource.GetNetMap(ctx, 0)
if err != nil {
return nil, -1, fmt.Errorf("can't get netmap: %w", err)
}
- cnr, err := s.cnrSource.Get(cid)
+ cnr, err := s.cnrSource.Get(ctx, cid)
if err != nil {
return nil, -1, fmt.Errorf("can't get container: %w", err)
}
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index 95bdda34b..e7a13827e 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -131,7 +131,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) {
t.Run("boltdb forest", func(t *testing.T) {
p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
require.NoError(t, p.Open(context.Background(), 0o644))
- require.NoError(t, p.Init())
+ require.NoError(t, p.Init(context.Background()))
testGetSubTreeOrderAsc(t, p)
})
}
diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go
index 0f0e4ee57..07503f8c3 100644
--- a/pkg/services/tree/metrics.go
+++ b/pkg/services/tree/metrics.go
@@ -6,6 +6,7 @@ type MetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
+ AddOperation(string, string)
}
type defaultMetricsRegister struct{}
@@ -13,3 +14,4 @@ type defaultMetricsRegister struct{}
func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {}
+func (defaultMetricsRegister) AddOperation(string, string) {}
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 1633ae557..56cbcc081 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -1,7 +1,9 @@
package tree
import (
+ "context"
"crypto/ecdsa"
+ "sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
@@ -18,12 +20,12 @@ import (
type ContainerSource interface {
container.Source
- DeletionInfo(cid.ID) (*container.DelInfo, error)
+ DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error)
// List must return list of all the containers in the FrostFS network
// at the moment of a call and any error that does not allow fetching
// container information.
- List() ([]cid.ID, error)
+ List(ctx context.Context) ([]cid.ID, error)
}
type cfg struct {
@@ -40,7 +42,8 @@ type cfg struct {
replicatorWorkerCount int
replicatorTimeout time.Duration
containerCacheSize int
- authorizedKeys [][]byte
+ authorizedKeys atomic.Pointer[[][]byte]
+ syncBatchSize int
localOverrideStorage policyengine.LocalOverrideStorage
morphChainStorage policyengine.MorphRuleChainStorageReader
@@ -113,6 +116,12 @@ func WithReplicationWorkerCount(n int) Option {
}
}
+func WithSyncBatchSize(n int) Option {
+ return func(c *cfg) {
+ c.syncBatchSize = n
+ }
+}
+
func WithContainerCacheSize(n int) Option {
return func(c *cfg) {
if n > 0 {
@@ -139,10 +148,7 @@ func WithMetrics(v MetricsRegister) Option {
// keys that have rights to use Tree service.
func WithAuthorizedKeys(keys keys.PublicKeys) Option {
return func(c *cfg) {
- c.authorizedKeys = nil
- for _, key := range keys {
- c.authorizedKeys = append(c.authorizedKeys, key.Bytes())
- }
+ c.authorizedKeys.Store(fromPublicKeys(keys))
}
}
diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go
new file mode 100644
index 000000000..8f21686df
--- /dev/null
+++ b/pkg/services/tree/qos.go
@@ -0,0 +1,101 @@
+package tree
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+)
+
+var _ TreeServiceServer = (*ioTagAdjust)(nil)
+
+type AdjustIOTag interface {
+ AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
+}
+
+type ioTagAdjust struct {
+ s TreeServiceServer
+ a AdjustIOTag
+}
+
+func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer {
+ return &ioTagAdjust{
+ s: s,
+ a: a,
+ }
+}
+
+func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Add(ctx, req)
+}
+
+func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.AddByPath(ctx, req)
+}
+
+func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Apply(ctx, req)
+}
+
+func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.GetNodeByPath(ctx, req)
+}
+
+func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
+ ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
+ return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{
+ sender: srv,
+ ServerStream: srv,
+ ctxF: func() context.Context { return ctx },
+ })
+}
+
+func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
+ ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
+ return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{
+ sender: srv,
+ ServerStream: srv,
+ ctxF: func() context.Context { return ctx },
+ })
+}
+
+func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Healthcheck(ctx, req)
+}
+
+func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Move(ctx, req)
+}
+
+func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Remove(ctx, req)
+}
+
+func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.TreeList(ctx, req)
+}
+
+type qosSend[T any] interface {
+ Send(T) error
+}
+
+type qosServerWrapper[T any] struct {
+ grpc.ServerStream
+ sender qosSend[T]
+ ctxF func() context.Context
+}
+
+func (w *qosServerWrapper[T]) Send(resp T) error {
+ return w.sender.Send(resp)
+}
+
+func (w *qosServerWrapper[T]) Context() context.Context {
+ return w.ctxF()
+}
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index 5bde3ae38..647f8cb30 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -6,7 +6,6 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
@@ -20,8 +19,8 @@ var errNoSuitableNode = errors.New("no node was found to execute the request")
func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) {
var resp *Resp
var outErr error
- err := s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = callback(c, ctx, req)
+ err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ resp, outErr = callback(c, fCtx, req)
return true
})
if err != nil {
@@ -32,7 +31,7 @@ func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapS
// forEachNode executes callback for each node in the container until true is returned.
// Returns errNoSuitableNode if there was no successful attempt to dial any node.
-func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error {
+func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error {
for _, n := range cntNodes {
if bytes.Equal(n.PublicKey(), s.rawPub) {
return nil
@@ -42,25 +41,15 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
var called bool
for _, n := range cntNodes {
var stop bool
- n.IterateNetworkEndpoints(func(endpoint string) bool {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
- trace.WithAttributes(
- attribute.String("endpoint", endpoint),
- ))
- defer span.End()
-
- c, err := s.cache.get(ctx, endpoint)
- if err != nil {
- return false
+ for endpoint := range n.NetworkEndpoints() {
+ stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool {
+ called = true
+ return f(fCtx, c)
+ })
+ if called {
+ break
}
-
- s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
-
- called = true
- stop = f(c)
- return true
- })
+ }
if stop {
return nil
}
@@ -70,3 +59,19 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
}
return nil
}
+
+func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
+ trace.WithAttributes(
+ attribute.String("endpoint", endpoint),
+ ))
+ defer span.End()
+
+ c, err := s.cache.get(ctx, endpoint)
+ if err != nil {
+ return false
+ }
+
+ s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint))
+ return f(ctx, c)
+}
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index 95c8f8013..ee40884eb 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -40,6 +39,7 @@ const (
defaultReplicatorCapacity = 64
defaultReplicatorWorkerCount = 64
defaultReplicatorSendTimeout = time.Second * 5
+ defaultSyncBatchSize = 1000
)
func (s *Service) localReplicationWorker(ctx context.Context) {
@@ -57,8 +57,8 @@ func (s *Service) localReplicationWorker(ctx context.Context) {
err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
if err != nil {
- s.log.Error(logs.TreeFailedToApplyReplicatedOperation,
- zap.String("err", err.Error()))
+ s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation,
+ zap.Error(err))
}
span.End()
}
@@ -89,41 +89,23 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
var lastErr error
var lastAddr string
- n.IterateNetworkEndpoints(func(addr string) bool {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
- trace.WithAttributes(
- attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
- attribute.String("address", addr),
- ),
- )
- defer span.End()
-
+ for addr := range n.NetworkEndpoints() {
lastAddr = addr
-
- c, err := s.cache.get(ctx, addr)
- if err != nil {
- lastErr = fmt.Errorf("can't create client: %w", err)
- return false
+ lastErr = s.apply(ctx, n, addr, req)
+ if lastErr == nil {
+ break
}
-
- ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
- _, lastErr = c.Apply(ctx, req)
- cancel()
-
- return lastErr == nil
- })
+ }
if lastErr != nil {
if errors.Is(lastErr, errRecentlyFailed) {
- s.log.Debug(logs.TreeDoNotSendUpdateToTheNode,
- zap.String("last_error", lastErr.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode,
+ zap.String("last_error", lastErr.Error()))
} else {
- s.log.Warn(logs.TreeFailedToSentUpdateToTheNode,
+ s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode,
zap.String("last_error", lastErr.Error()),
zap.String("address", lastAddr),
- zap.String("key", hex.EncodeToString(n.PublicKey())),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("key", hex.EncodeToString(n.PublicKey())))
}
s.metrics.AddReplicateTaskDuration(time.Since(start), false)
return lastErr
@@ -132,6 +114,26 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
return nil
}
+func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
+ attribute.String("address", addr),
+ ),
+ )
+ defer span.End()
+
+ c, err := s.cache.get(ctx, addr)
+ if err != nil {
+ return fmt.Errorf("can't create client: %w", err)
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
+ _, err = c.Apply(ctx, req)
+ cancel()
+ return err
+}
+
func (s *Service) replicateLoop(ctx context.Context) {
for range s.replicatorWorkerCount {
go s.replicationWorker(ctx)
@@ -151,10 +153,10 @@ func (s *Service) replicateLoop(ctx context.Context) {
return
case op := <-s.replicateCh:
start := time.Now()
- err := s.replicate(op)
+ err := s.replicate(ctx, op)
if err != nil {
- s.log.Error(logs.TreeErrorDuringReplication,
- zap.String("err", err.Error()),
+ s.log.Error(ctx, logs.TreeErrorDuringReplication,
+ zap.Error(err),
zap.Stringer("cid", op.cid),
zap.String("treeID", op.treeID))
}
@@ -163,14 +165,14 @@ func (s *Service) replicateLoop(ctx context.Context) {
}
}
-func (s *Service) replicate(op movePair) error {
+func (s *Service) replicate(ctx context.Context, op movePair) error {
req := newApplyRequest(&op)
err := SignMessage(req, s.key)
if err != nil {
return fmt.Errorf("can't sign data: %w", err)
}
- nodes, localIndex, err := s.getContainerNodes(op.cid)
+ nodes, localIndex, err := s.getContainerNodes(ctx, op.cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -204,7 +206,7 @@ func newApplyRequest(op *movePair) *ApplyRequest {
TreeId: op.treeID,
Operation: &LogMove{
ParentId: op.op.Parent,
- Meta: op.op.Meta.Bytes(),
+ Meta: op.op.Bytes(),
ChildId: op.op.Child,
},
},
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 8097d545c..3994d6973 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -9,12 +9,15 @@ import (
"sync"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
@@ -55,14 +58,16 @@ func New(opts ...Option) *Service {
s.replicatorChannelCapacity = defaultReplicatorCapacity
s.replicatorWorkerCount = defaultReplicatorWorkerCount
s.replicatorTimeout = defaultReplicatorSendTimeout
+ s.syncBatchSize = defaultSyncBatchSize
s.metrics = defaultMetricsRegister{}
+ s.authorizedKeys.Store(&[][]byte{})
for i := range opts {
opts[i](&s.cfg)
}
if s.log == nil {
- s.log = &logger.Logger{Logger: zap.NewNop()}
+ s.log = logger.NewLoggerWrapper(zap.NewNop())
}
s.cache.init(s.key, s.ds)
@@ -82,6 +87,7 @@ func New(opts ...Option) *Service {
// Start starts the service.
func (s *Service) Start(ctx context.Context) {
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String())
go s.replicateLoop(ctx)
go s.syncLoop(ctx)
@@ -101,6 +107,7 @@ func (s *Service) Shutdown() {
}
func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
+ defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -112,12 +119,12 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
@@ -144,6 +151,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
}
func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
+ defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -155,12 +163,12 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
@@ -199,6 +207,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
}
func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
+ defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -210,12 +219,12 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
@@ -243,6 +252,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
// Move applies client operation to the specified tree and pushes in queue
// for replication on other nodes.
func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
+ defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -254,12 +264,12 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
@@ -286,6 +296,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
}
func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
+ defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -297,12 +308,12 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
@@ -336,14 +347,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
} else {
var metaValue []KeyValue
for _, kv := range m.Items {
- for _, attr := range b.GetAttributes() {
- if kv.Key == attr {
- metaValue = append(metaValue, KeyValue{
- Key: kv.Key,
- Value: kv.Value,
- })
- break
- }
+ if slices.Contains(b.GetAttributes(), kv.Key) {
+ metaValue = append(metaValue, KeyValue{
+ Key: kv.Key,
+ Value: kv.Value,
+ })
}
}
x.Meta = metaValue
@@ -359,6 +367,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
}
func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
+ defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -370,20 +379,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return err
}
- err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(srv.Context(), cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetSubTreeClient
var outErr error
- err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
- cli, outErr = c.GetSubTree(srv.Context(), req)
+ err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ cli, outErr = c.GetSubTree(fCtx, req)
return true
})
if err != nil {
@@ -405,7 +414,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
type stackItem struct {
values []pilorama.MultiNodeInfo
parent pilorama.MultiNode
- last *string
+ last *pilorama.Cursor
}
func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
@@ -429,10 +438,8 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
}
if ms == nil {
ms = m.Items
- } else {
- if len(m.Items) != 1 {
- return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
- }
+ } else if len(m.Items) != 1 {
+ return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
}
ts = append(ts, m.Time)
ps = append(ps, p)
@@ -456,14 +463,13 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
break
}
- nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
+ var err error
+ item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
if err != nil {
return err
}
- item.values = nodes
- item.last = last
- if len(nodes) == 0 {
+ if len(item.values) == 0 {
stack = stack[:len(stack)-1]
continue
}
@@ -585,7 +591,8 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di
}
// Apply locally applies operation from the remote node to the tree.
-func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+ defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx))
err := verifyMessage(req)
if err != nil {
return nil, err
@@ -598,7 +605,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e
key := req.GetSignature().GetKey()
- _, pos, _, err := s.getContainerInfo(cid, key)
+ _, pos, _, err := s.getContainerInfo(ctx, cid, key)
if err != nil {
return nil, err
}
@@ -629,6 +636,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e
}
func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
+ defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -640,15 +648,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
return err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(srv.Context(), cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetOpLogClient
var outErr error
- err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
- cli, outErr = c.GetOpLog(srv.Context(), req)
+ err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ cli, outErr = c.GetOpLog(fCtx, req)
return true
})
if err != nil {
@@ -679,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
Body: &GetOpLogResponse_Body{
Operation: &LogMove{
ParentId: lm.Parent,
- Meta: lm.Meta.Bytes(),
+ Meta: lm.Bytes(),
ChildId: lm.Child,
},
},
@@ -693,6 +701,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
}
func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
+ defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -712,7 +721,7 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
@@ -754,8 +763,8 @@ func metaToProto(arr []pilorama.KeyValue) []KeyValue {
// getContainerInfo returns the list of container nodes, position in the container for the node
// with pub key and total amount of nodes in all replicas.
-func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
- cntNodes, _, err := s.getContainerNodes(cid)
+func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
+ cntNodes, _, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, 0, 0, err
}
@@ -775,3 +784,15 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec
return new(HealthcheckResponse), nil
}
+
+func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) {
+ s.authorizedKeys.Store(fromPublicKeys(newKeys))
+}
+
+func fromPublicKeys(keys keys.PublicKeys) *[][]byte {
+ buff := make([][]byte, len(keys))
+ for i, k := range keys {
+ buff[i] = k.Bytes()
+ }
+ return &buff
+}
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 4fd4a7e1e..8221a4546 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -9,8 +9,10 @@ import (
"fmt"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@@ -36,7 +38,7 @@ var (
// Operation must be one of:
// - 1. ObjectPut;
// - 2. ObjectGet.
-func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error {
+func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error {
err := verifyMessage(req)
if err != nil {
return err
@@ -47,7 +49,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return err
}
- cnr, err := s.cnrSource.Get(cid)
+ cnr, err := s.cnrSource.Get(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container %s: %w", cid, err)
}
@@ -62,7 +64,22 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return fmt.Errorf("can't get request role: %w", err)
}
- return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey)
+ if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil {
+ return apeErr(err)
+ }
+ return nil
+}
+
+func apeErr(err error) error {
+ var chRouterErr *checkercore.ChainRouterError
+ if !errors.As(err, &chRouterErr) {
+ errServerInternal := &apistatus.ServerInternal{}
+ apistatus.WriteInternalServerErr(errServerInternal, err)
+ return errServerInternal
+ }
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(err.Error())
+ return errAccessDenied
}
// Returns true iff the operation is read-only and request was signed
@@ -78,8 +95,8 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) {
}
key := sign.GetKey()
- for i := range s.authorizedKeys {
- if bytes.Equal(s.authorizedKeys[i], key) {
+ for _, currentKey := range *s.authorizedKeys.Load() {
+ if bytes.Equal(currentKey, key) {
return true, nil
}
}
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index 7bc5002dc..8815c227f 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -31,6 +31,8 @@ import (
"github.com/stretchr/testify/require"
)
+const versionTreeID = "version"
+
type dummyNetmapSource struct {
netmap.Source
}
@@ -39,7 +41,7 @@ type dummySubjectProvider struct {
subjects map[util.Uint160]client.SubjectExtended
}
-func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, error) {
+func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
res := s.subjects[addr]
return &client.Subject{
PrimaryKey: res.PrimaryKey,
@@ -50,7 +52,7 @@ func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, er
}, nil
}
-func (s dummySubjectProvider) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
+func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
res := s.subjects[addr]
return &res, nil
}
@@ -65,7 +67,7 @@ func (s dummyEpochSource) CurrentEpoch() uint64 {
type dummyContainerSource map[string]*containercore.Container
-func (s dummyContainerSource) List() ([]cid.ID, error) {
+func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
res := make([]cid.ID, 0, len(s))
var cnr cid.ID
@@ -81,7 +83,7 @@ func (s dummyContainerSource) List() ([]cid.ID, error) {
return res, nil
}
-func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
+func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) {
cnt, ok := s[id.String()]
if !ok {
return nil, errors.New("container not found")
@@ -89,7 +91,7 @@ func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
return cnt, nil
}
-func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) {
+func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) {
return &containercore.DelInfo{}, nil
}
@@ -150,6 +152,7 @@ func TestMessageSign(t *testing.T) {
apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}),
}
+ s.cfg.authorizedKeys.Store(&[][]byte{})
rawCID1 := make([]byte, sha256.Size)
cid1.Encode(rawCID1)
@@ -168,26 +171,26 @@ func TestMessageSign(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRW)
t.Run("missing signature, no panic", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
t.Run("invalid CID", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
cnr.Value.SetBasicACL(acl.Private)
t.Run("extension disabled", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
t.Run("invalid key", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
})
t.Run("bearer", func(t *testing.T) {
@@ -200,7 +203,7 @@ func TestMessageSign(t *testing.T) {
t.Run("invalid bearer", func(t *testing.T) {
req.Body.BearerToken = []byte{0xFF}
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer CID", func(t *testing.T) {
@@ -209,7 +212,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer owner", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -217,7 +220,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer signature", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -229,20 +232,112 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bv2.StableMarshal(nil)
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ })
+
+ t.Run("omit override within bt", func(t *testing.T) {
+ t.Run("personated", func(t *testing.T) {
+ bt := testBearerTokenNoOverride()
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override")
+ })
+
+ t.Run("impersonated", func(t *testing.T) {
+ bt := testBearerTokenNoOverride()
+ bt.SetImpersonate(true)
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ })
+ })
+
+ t.Run("invalid override within bearer token", func(t *testing.T) {
+ t.Run("personated", func(t *testing.T) {
+ bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
+ })
+
+ t.Run("impersonated", func(t *testing.T) {
+ bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
+ bt.SetImpersonate(true)
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
+ })
})
t.Run("impersonate", func(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRWExtended)
var bt bearer.Token
+ bt.SetExp(10)
+ bt.SetImpersonate(true)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ })
+
+ t.Run("impersonate, but target user is still set", func(t *testing.T) {
+ var bt bearer.Token
+ bt.SetExp(10)
bt.SetImpersonate(true)
+ var reqSigner user.ID
+ user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey()))
+
+ bt.ForUser(reqSigner)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ })
+
+ t.Run("impersonate but invalid signer", func(t *testing.T) {
+ var bt bearer.Token
+ bt.SetExp(10)
+ bt.SetImpersonate(true)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
require.NoError(t, bt.Sign(privs[1].PrivateKey))
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -252,18 +347,18 @@ func TestMessageSign(t *testing.T) {
t.Run("put and get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("only get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[2].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("none", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[3].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
})
}
@@ -282,6 +377,25 @@ func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token
return b
}
+func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token {
+ var b bearer.Token
+ b.SetExp(currentEpoch + 1)
+ b.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ },
+ Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
+ })
+
+ return b
+}
+
+func testBearerTokenNoOverride() bearer.Token {
+ var b bearer.Token
+ b.SetExp(currentEpoch + 1)
+ return b
+}
+
func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain {
ruleGet := chain.Rule{
Status: chain.Allow,
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index ce1e72104..af355639f 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -2,7 +2,9 @@ package tree
import (
"context"
+ "crypto/ecdsa"
"crypto/sha256"
+ "crypto/tls"
"errors"
"fmt"
"io"
@@ -13,6 +15,8 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -20,12 +24,15 @@ import (
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@@ -39,7 +46,7 @@ const defaultSyncWorkerCount = 20
// tree IDs from the other container nodes. Returns ErrNotInContainer if the node
// is not included in the container.
func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
- nodes, pos, err := s.getContainerNodes(cid)
+ nodes, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -71,8 +78,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
var treesToSync []string
var outErr error
- err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool {
- resp, outErr = c.TreeList(ctx, req)
+ err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool {
+ resp, outErr = c.TreeList(fCtx, req)
if outErr != nil {
return false
}
@@ -92,7 +99,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
for _, tid := range treesToSync {
h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree,
+ s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
continue
@@ -100,7 +107,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
if h < newHeight {
if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
- s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
+ s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
}
@@ -112,7 +119,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
// SynchronizeTree tries to synchronize log starting from the last stored height.
func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error {
- nodes, pos, err := s.getContainerNodes(cid)
+ nodes, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -131,14 +138,9 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string
}
// mergeOperationStreams performs merge sort for node operation streams to one stream.
-func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
+func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
defer close(merged)
- ms := make([]*pilorama.Move, len(streams))
- for i := range streams {
- ms[i] = <-streams[i]
- }
-
// Merging different node streams shuffles incoming operations like that:
//
// x - operation from the stream A
@@ -150,6 +152,15 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram
// operation height from the stream B. This height is stored in minStreamedLastHeight.
var minStreamedLastHeight uint64 = math.MaxUint64
+ ms := make([]*pilorama.Move, len(streams))
+ for i := range streams {
+ select {
+ case ms[i] = <-streams[i]:
+ case <-ctx.Done():
+ return minStreamedLastHeight
+ }
+ }
+
for {
var minTimeMoveTime uint64 = math.MaxUint64
minTimeMoveIndex := -1
@@ -164,7 +175,11 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram
break
}
- merged <- ms[minTimeMoveIndex]
+ select {
+ case merged <- ms[minTimeMoveIndex]:
+ case <-ctx.Done():
+ return minStreamedLastHeight
+ }
height := ms[minTimeMoveIndex].Time
if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
minStreamedLastHeight = min(minStreamedLastHeight, height)
@@ -176,38 +191,30 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram
func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string,
operationStream <-chan *pilorama.Move,
-) uint64 {
- errGroup, _ := errgroup.WithContext(ctx)
- const workersCount = 1024
- errGroup.SetLimit(workersCount)
-
- // We run TreeApply concurrently for the operation batch. Let's consider two operations
- // in the batch m1 and m2 such that m1.Time < m2.Time. The engine may apply m2 and fail
- // on m1. That means the service must start sync from m1.Time in the next iteration and
- // this height is stored in unappliedOperationHeight.
- var unappliedOperationHeight uint64 = math.MaxUint64
- var heightMtx sync.Mutex
-
+) (uint64, error) {
var prev *pilorama.Move
+ var batch []*pilorama.Move
for m := range operationStream {
// skip already applied op
if prev != nil && prev.Time == m.Time {
continue
}
prev = m
+ batch = append(batch, m)
- errGroup.Go(func() error {
- if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil {
- heightMtx.Lock()
- unappliedOperationHeight = min(unappliedOperationHeight, m.Time)
- heightMtx.Unlock()
- return err
+ if len(batch) == s.syncBatchSize {
+ if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
+ return batch[0].Time, err
}
- return nil
- })
+ batch = batch[:0]
+ }
}
- _ = errGroup.Wait()
- return unappliedOperationHeight
+ if len(batch) > 0 {
+ if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
+ return batch[0].Time, err
+ }
+ }
+ return math.MaxUint64, nil
}
func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
@@ -240,10 +247,14 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
Parent: lm.GetParentId(),
Child: lm.GetChildId(),
}
- if err := m.Meta.FromBytes(lm.GetMeta()); err != nil {
+ if err := m.FromBytes(lm.GetMeta()); err != nil {
return err
}
- opsCh <- m
+ select {
+ case opsCh <- m:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
}
if !errors.Is(err, io.EOF) {
return err
@@ -259,7 +270,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
treeID string, nodes []netmapSDK.NodeInfo,
) uint64 {
- s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
+ s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
errGroup, egCtx := errgroup.WithContext(ctx)
const workersCount = 1024
@@ -272,13 +283,14 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
merged := make(chan *pilorama.Move)
var minStreamedLastHeight uint64
errGroup.Go(func() error {
- minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged)
+ minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged)
return nil
})
var minUnappliedHeight uint64
errGroup.Go(func() error {
- minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged)
- return nil
+ var err error
+ minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged)
+ return err
})
var allNodesSynced atomic.Bool
@@ -287,27 +299,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
for i, n := range nodes {
errGroup.Go(func() error {
var nodeSynced bool
- n.IterateNetworkEndpoints(func(addr string) bool {
+ for addr := range n.NetworkEndpoints() {
var a network.Address
if err := a.FromString(addr); err != nil {
- s.log.Warn(logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- return false
+ s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ continue
}
- cc, err := s.createConnection(a)
+ cc, err := dialTreeService(ctx, a, s.key, s.ds)
if err != nil {
- s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- return false
+ s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ continue
}
- defer cc.Close()
err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i])
if err != nil {
- s.log.Warn(logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
+ s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
}
nodeSynced = err == nil
- return true
- })
+ _ = cc.Close()
+ break
+ }
close(nodeOperationStreams[i])
if !nodeSynced {
allNodesSynced.Store(false)
@@ -317,7 +329,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
}
if err := errGroup.Wait(); err != nil {
allNodesSynced.Store(false)
- s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
+ s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
}
newHeight := minStreamedLastHeight
@@ -332,19 +344,60 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return from
}
-func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
- return grpc.NewClient(a.URIAddr(),
+func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) {
+ cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer()))
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ defer cancel()
+
+ req := &HealthcheckRequest{
+ Body: &HealthcheckRequest_Body{},
+ }
+ if err := SignMessage(req, key); err != nil {
+ return nil, err
+ }
+
+ // perform some request to check connection
+ if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
+ _ = cc.Close()
+ return nil, err
+ }
+ return cc, nil
+}
+
+func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ host, isTLS, err := client.ParseURI(a.URIAddr())
+ if err != nil {
+ return nil, err
+ }
+
+ creds := insecure.NewCredentials()
+ if isTLS {
+ creds = credentials.NewTLS(&tls.Config{})
+ }
+
+ defaultOpts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
+ qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
- tracing_grpc.NewUnaryClientInteceptor(),
+ tracing_grpc.NewUnaryClientInterceptor(),
+ tagging.NewUnaryClientInterceptor(),
),
grpc.WithChainStreamInterceptor(
+ qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
metrics.NewStreamClientInterceptor(),
tracing_grpc.NewStreamClientInterceptor(),
+ tagging.NewStreamClientInterceptor(),
),
- grpc.WithTransportCredentials(insecure.NewCredentials()),
+ grpc.WithTransportCredentials(creds),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- )
+ grpc.WithDisableServiceConfig(),
+ }
+
+ return grpc.NewClient(host, append(defaultOpts, opts...)...)
}
// ErrAlreadySyncing is returned when a service synchronization has already
@@ -384,25 +437,25 @@ func (s *Service) syncLoop(ctx context.Context) {
return
case <-s.syncChan:
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
- s.log.Debug(logs.TreeSyncingTrees)
+ s.log.Info(ctx, logs.TreeSyncingTrees)
start := time.Now()
- cnrs, err := s.cfg.cnrSource.List()
+ cnrs, err := s.cnrSource.List(ctx)
if err != nil {
- s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err))
+ s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
s.metrics.AddSyncDuration(time.Since(start), false)
span.End()
break
}
- newMap, cnrsToSync := s.containersToSync(cnrs)
+ newMap, cnrsToSync := s.containersToSync(ctx, cnrs)
s.syncContainers(ctx, cnrsToSync)
s.removeContainers(ctx, newMap)
- s.log.Debug(logs.TreeTreesHaveBeenSynchronized)
+ s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized)
s.metrics.AddSyncDuration(time.Since(start), true)
span.End()
@@ -422,19 +475,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
err := s.syncPool.Submit(func() {
defer wg.Done()
- s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
err := s.synchronizeAllTrees(ctx, cnr)
if err != nil {
- s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
+ s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
return
}
- s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
+ s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
})
if err != nil {
wg.Done()
- s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization,
+ s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization,
zap.Stringer("cid", cnr),
zap.Error(err))
if errors.Is(err, ants.ErrPoolClosed) {
@@ -458,9 +511,9 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
continue
}
- existed, err := containerCore.WasRemoved(s.cnrSource, cnr)
+ existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr)
if err != nil {
- s.log.Error(logs.TreeCouldNotCheckIfContainerExisted,
+ s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted,
zap.Stringer("cid", cnr),
zap.Error(err))
} else if existed {
@@ -472,25 +525,25 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
}
for _, cnr := range removed {
- s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
err := s.DropTree(ctx, cnr, "")
if err != nil {
- s.log.Error(logs.TreeCouldNotRemoveRedundantTree,
+ s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree,
zap.Stringer("cid", cnr),
zap.Error(err))
}
}
}
-func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
+func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
newMap := make(map[cid.ID]struct{}, len(s.cnrMap))
cnrsToSync := make([]cid.ID, 0, len(cnrs))
for _, cnr := range cnrs {
- _, pos, err := s.getContainerNodes(cnr)
+ _, pos, err := s.getContainerNodes(ctx, cnr)
if err != nil {
- s.log.Error(logs.TreeCouldNotCalculateContainerNodes,
+ s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes,
zap.Stringer("cid", cnr),
zap.Error(err))
continue
diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go
index 497d90554..87d419408 100644
--- a/pkg/services/tree/sync_test.go
+++ b/pkg/services/tree/sync_test.go
@@ -1,6 +1,7 @@
package tree
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -64,7 +65,7 @@ func Test_mergeOperationStreams(t *testing.T) {
merged := make(chan *pilorama.Move, 1)
min := make(chan uint64)
go func() {
- min <- mergeOperationStreams(nodeOpChans, merged)
+ min <- mergeOperationStreams(context.Background(), nodeOpChans, merged)
}()
var res []uint64
diff --git a/internal/ape/converter.go b/pkg/util/ape/converter.go
similarity index 100%
rename from internal/ape/converter.go
rename to pkg/util/ape/converter.go
diff --git a/internal/ape/converter_test.go b/pkg/util/ape/converter_test.go
similarity index 100%
rename from internal/ape/converter_test.go
rename to pkg/util/ape/converter_test.go
diff --git a/cmd/frostfs-cli/modules/util/ape.go b/pkg/util/ape/parser.go
similarity index 87%
rename from cmd/frostfs-cli/modules/util/ape.go
rename to pkg/util/ape/parser.go
index 73c368510..6f114d45b 100644
--- a/cmd/frostfs-cli/modules/util/ape.go
+++ b/pkg/util/ape/parser.go
@@ -1,16 +1,14 @@
-package util
+package ape
import (
"errors"
"fmt"
"os"
- "strconv"
"strings"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/flynn-archive/go-shlex"
- "github.com/spf13/cobra"
)
var (
@@ -27,38 +25,6 @@ var (
errFailedToParseAllAny = errors.New("any/all is not parsed")
)
-// PrintHumanReadableAPEChain print APE chain rules.
-func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) {
- cmd.Println("Chain ID: " + string(chain.ID))
- cmd.Printf(" HEX: %x\n", chain.ID)
- cmd.Println("Rules:")
- for _, rule := range chain.Rules {
- cmd.Println("\n\tStatus: " + rule.Status.String())
- cmd.Println("\tAny: " + strconv.FormatBool(rule.Any))
- cmd.Println("\tConditions:")
- for _, c := range rule.Condition {
- var ot string
- switch c.Kind {
- case apechain.KindResource:
- ot = "Resource"
- case apechain.KindRequest:
- ot = "Request"
- default:
- panic("unknown object type")
- }
- cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value))
- }
- cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted))
- for _, name := range rule.Actions.Names {
- cmd.Println("\t\t" + name)
- }
- cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted))
- for _, name := range rule.Resources.Names {
- cmd.Println("\t\t" + name)
- }
- }
-}
-
func ParseAPEChainBinaryOrJSON(chain *apechain.Chain, path string) error {
data, err := os.ReadFile(path)
if err != nil {
@@ -208,11 +174,11 @@ func parseStatus(lexeme string) (apechain.Status, error) {
case "deny":
if !found {
return apechain.AccessDenied, nil
- } else if strings.EqualFold(expression, "QuotaLimitReached") {
- return apechain.QuotaLimitReached, nil
- } else {
- return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression)
}
+ if strings.EqualFold(expression, "QuotaLimitReached") {
+ return apechain.QuotaLimitReached, nil
+ }
+ return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression)
case "allow":
if found {
return 0, errUnknownStatusDetail
@@ -295,7 +261,7 @@ func parseResource(lexeme string, isObj bool) (string, error) {
} else {
if lexeme == "*" {
return nativeschema.ResourceFormatAllContainers, nil
- } else if lexeme == "/*" {
+ } else if lexeme == "/*" || lexeme == "root/*" {
return nativeschema.ResourceFormatRootContainers, nil
} else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 {
lexeme = lexeme[1:]
diff --git a/cmd/frostfs-cli/modules/util/ape_test.go b/pkg/util/ape/parser_test.go
similarity index 96%
rename from cmd/frostfs-cli/modules/util/ape_test.go
rename to pkg/util/ape/parser_test.go
index b275803df..c236c4603 100644
--- a/cmd/frostfs-cli/modules/util/ape_test.go
+++ b/pkg/util/ape/parser_test.go
@@ -1,4 +1,4 @@
-package util
+package ape
import (
"fmt"
@@ -43,6 +43,15 @@ func TestParseAPERule(t *testing.T) {
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
},
},
+ {
+ name: "Valid rule for all containers in explicit root namespace",
+ rule: "allow Container.Put root/*",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}},
+ },
+ },
{
name: "Valid rule for all objects in root namespace and container",
rule: "allow Object.Put /cid/*",
diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go
index 547c8d50b..66581878a 100644
--- a/pkg/util/attributes/parser_test.go
+++ b/pkg/util/attributes/parser_test.go
@@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) {
mExp = mSrc
}
- node.IterateAttributes(func(key, value string) {
+ for key, value := range node.Attributes() {
v, ok := mExp[key]
require.True(t, ok)
require.Equal(t, value, v)
delete(mExp, key)
- })
+ }
require.Empty(t, mExp)
}
diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go
index a9877e007..8569ec734 100644
--- a/pkg/util/http/calls.go
+++ b/pkg/util/http/calls.go
@@ -32,8 +32,8 @@ func (x *Server) Serve() error {
//
// Once Shutdown has been called on a server, it may not be reused;
// future calls to Serve method will have no effect.
-func (x *Server) Shutdown() error {
- ctx, cancel := context.WithTimeout(context.Background(), x.shutdownTimeout)
+func (x *Server) Shutdown(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout)
err := x.srv.Shutdown(ctx)
diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go
index 923412a7f..2589ab786 100644
--- a/pkg/util/http/server.go
+++ b/pkg/util/http/server.go
@@ -76,8 +76,7 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server {
o(c)
}
- switch {
- case c.shutdownTimeout <= 0:
+ if c.shutdownTimeout <= 0 {
panicOnOptValue("shutdown timeout", c.shutdownTimeout)
}
diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go
index b2942b52a..6337039a9 100644
--- a/pkg/util/keyer/dashboard.go
+++ b/pkg/util/keyer/dashboard.go
@@ -6,6 +6,7 @@ import (
"os"
"text/tabwriter"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -104,9 +105,7 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) {
func base58ToHex(data string) string {
val, err := base58.Decode(data)
- if err != nil {
- panic("produced incorrect base58 value")
- }
+ assert.NoError(err, "produced incorrect base58 value")
return hex.EncodeToString(val)
}
diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go
new file mode 100644
index 000000000..413b1d9aa
--- /dev/null
+++ b/pkg/util/logger/log.go
@@ -0,0 +1,35 @@
+package logger
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
+ qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "go.uber.org/zap"
+)
+
+func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Debug(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Info(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Warn(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Error(msg, appendContext(ctx, fields...)...)
+}
+
+func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field {
+ if traceID := tracing.GetTraceID(ctx); traceID != "" {
+ fields = append(fields, zap.String("trace_id", traceID))
+ }
+ if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined {
+ fields = append(fields, zap.String("io_tag", ioTag))
+ }
+ return fields
+}
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index 4b60f02de..a1998cb1a 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -2,6 +2,7 @@ package logger
import (
"fmt"
+ "time"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/ssgreg/journald"
@@ -12,8 +13,10 @@ import (
// Logger represents a component
// for writing messages to log.
type Logger struct {
- *zap.Logger
- lvl zap.AtomicLevel
+ z *zap.Logger
+ c zapcore.Core
+ t Tag
+ w bool
}
// Prm groups Logger's parameters.
@@ -22,16 +25,8 @@ type Logger struct {
// Parameters that have been connected to the Logger support its
// configuration changing.
//
-// Passing Prm after a successful connection via the NewLogger, connects
-// the Prm to a new instance of the Logger.
-//
-// See also Reload, SetLevelString.
+// See also Logger.Reload, SetLevelString.
type Prm struct {
- // link to the created Logger
- // instance; used for a runtime
- // reconfiguration
- _log *Logger
-
// support runtime rereading
level zapcore.Level
@@ -43,6 +38,12 @@ type Prm struct {
// PrependTimestamp specifies whether to prepend a timestamp in the log
PrependTimestamp bool
+
+ // Options for zap.Logger
+ Options []zap.Option
+
+ // map of tag's bit masks to log level, overrides lvl
+ tl map[Tag]zapcore.Level
}
const (
@@ -72,20 +73,10 @@ func (p *Prm) SetDestination(d string) error {
return nil
}
-// Reload reloads configuration of a connected instance of the Logger.
-// Returns ErrLoggerNotConnected if no connection has been performed.
-// Returns any reconfiguration error from the Logger directly.
-func (p Prm) Reload() error {
- if p._log == nil {
- // incorrect logger usage
- panic("parameters are not connected to any Logger")
- }
-
- return p._log.reload(p)
-}
-
-func defaultPrm() *Prm {
- return new(Prm)
+// SetTags parses list of tags with log level.
+func (p *Prm) SetTags(tags [][]string) (err error) {
+ p.tl, err = parseTags(tags)
+ return err
}
// NewLogger constructs a new zap logger instance. Constructing with nil
@@ -99,10 +90,7 @@ func defaultPrm() *Prm {
// - ISO8601 time encoding.
//
// Logger records a stack trace for all messages at or above fatal level.
-func NewLogger(prm *Prm) (*Logger, error) {
- if prm == nil {
- prm = defaultPrm()
- }
+func NewLogger(prm Prm) (*Logger, error) {
switch prm.dest {
case DestinationUndefined, DestinationStdout:
return newConsoleLogger(prm)
@@ -113,11 +101,9 @@ func NewLogger(prm *Prm) (*Logger, error) {
}
}
-func newConsoleLogger(prm *Prm) (*Logger, error) {
- lvl := zap.NewAtomicLevelAt(prm.level)
-
+func newConsoleLogger(prm Prm) (*Logger, error) {
c := zap.NewProductionConfig()
- c.Level = lvl
+ c.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
c.Encoding = "console"
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
@@ -129,25 +115,23 @@ func newConsoleLogger(prm *Prm) (*Logger, error) {
c.EncoderConfig.TimeKey = ""
}
- lZap, err := c.Build(
+ opts := []zap.Option{
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- )
+ zap.AddCallerSkip(1),
+ }
+ opts = append(opts, prm.Options...)
+ lZap, err := c.Build(opts...)
if err != nil {
return nil, err
}
-
- l := &Logger{Logger: lZap, lvl: lvl}
- prm._log = l
+ l := &Logger{z: lZap, c: lZap.Core()}
+ l = l.WithTag(TagMain)
return l, nil
}
-func newJournaldLogger(prm *Prm) (*Logger, error) {
- lvl := zap.NewAtomicLevelAt(prm.level)
-
+func newJournaldLogger(prm Prm) (*Logger, error) {
c := zap.NewProductionConfig()
- c.Level = lvl
- c.Encoding = "console"
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
@@ -160,22 +144,100 @@ func newJournaldLogger(prm *Prm) (*Logger, error) {
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
- core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
+ core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields)
coreWithContext := core.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
})
- lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
-
- l := &Logger{Logger: lZap, lvl: lvl}
- prm._log = l
+ var samplerOpts []zapcore.SamplerOption
+ if c.Sampling.Hook != nil {
+ samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook))
+ }
+ samplingCore := zapcore.NewSamplerWithOptions(
+ coreWithContext,
+ time.Second,
+ c.Sampling.Initial,
+ c.Sampling.Thereafter,
+ samplerOpts...,
+ )
+ opts := []zap.Option{
+ zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
+ zap.AddCallerSkip(1),
+ }
+ opts = append(opts, prm.Options...)
+ lZap := zap.New(samplingCore, opts...)
+ l := &Logger{z: lZap, c: lZap.Core()}
+ l = l.WithTag(TagMain)
return l, nil
}
-func (l *Logger) reload(prm Prm) error {
- l.lvl.SetLevel(prm.level)
- return nil
+// With create a child logger with new fields, don't affect the parent.
+// Throws panic if tag is unset.
+func (l *Logger) With(fields ...zap.Field) *Logger {
+ if l.t == 0 {
+ panic("tag is unset")
+ }
+ c := *l
+ c.z = l.z.With(fields...)
+ // With called under the logger
+ c.w = true
+ return &c
+}
+
+type core struct {
+ c zapcore.Core
+ l zap.AtomicLevel
+}
+
+func (c *core) Enabled(lvl zapcore.Level) bool {
+ return c.l.Enabled(lvl)
+}
+
+func (c *core) With(fields []zapcore.Field) zapcore.Core {
+ clone := *c
+ clone.c = clone.c.With(fields)
+ return &clone
+}
+
+func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
+ return c.c.Check(e, ce)
+}
+
+func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error {
+ return c.c.Write(e, fields)
+}
+
+func (c *core) Sync() error {
+ return c.c.Sync()
+}
+
+// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger.
+// Throws panic if provided unsupported tag.
+func (l *Logger) WithTag(tag Tag) *Logger {
+ if tag == 0 || tag > Tag(len(_Tag_index)-1) {
+ panic("unsupported tag " + tag.String())
+ }
+ if l.w {
+ panic("unsupported operation for the logger's state")
+ }
+ c := *l
+ c.t = tag
+ c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core {
+ return &core{
+ c: l.c.With([]zap.Field{zap.String("tag", tag.String())}),
+ l: tagToLogLevel[tag],
+ }
+ }))
+ return &c
+}
+
+func NewLoggerWrapper(z *zap.Logger) *Logger {
+ return &Logger{
+ z: z.WithOptions(zap.AddCallerSkip(1)),
+ t: TagMain,
+ c: z.Core(),
+ }
}
diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go
new file mode 100644
index 000000000..b867ee6cc
--- /dev/null
+++ b/pkg/util/logger/logger_test.go
@@ -0,0 +1,118 @@
+package logger
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest/observer"
+)
+
+func BenchmarkLogger(b *testing.B) {
+ ctx := context.Background()
+ m := map[string]Prm{}
+
+ prm := Prm{}
+ require.NoError(b, prm.SetLevelString("debug"))
+ m["logging enabled"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("error"))
+ m["logging disabled"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("error"))
+ require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}}))
+ m["logging enabled via tags"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("debug"))
+ require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}}))
+ m["logging disabled via tags"] = prm
+
+ for k, v := range m {
+ b.Run(k, func(b *testing.B) {
+ logger, err := createLogger(v)
+ require.NoError(b, err)
+ UpdateLevelForTags(v)
+ b.ResetTimer()
+ b.ReportAllocs()
+ for range b.N {
+ logger.Info(ctx, "test info")
+ }
+ })
+ }
+}
+
+type testCore struct {
+ core zapcore.Core
+}
+
+func (c *testCore) Enabled(lvl zapcore.Level) bool {
+ return c.core.Enabled(lvl)
+}
+
+func (c *testCore) With(fields []zapcore.Field) zapcore.Core {
+ c.core = c.core.With(fields)
+ return c
+}
+
+func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
+ return ce.AddCore(e, c)
+}
+
+func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error {
+ return nil
+}
+
+func (c *testCore) Sync() error {
+ return c.core.Sync()
+}
+
+func createLogger(prm Prm) (*Logger, error) {
+ prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
+ tc := testCore{core: core}
+ return &tc
+ })}
+ return NewLogger(prm)
+}
+
+func TestLoggerOutput(t *testing.T) {
+ obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel))
+
+ prm := Prm{}
+ require.NoError(t, prm.SetLevelString("debug"))
+ prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core {
+ return obs
+ })}
+ loggerMain, err := NewLogger(prm)
+ require.NoError(t, err)
+ UpdateLevelForTags(prm)
+
+ loggerMainWith := loggerMain.With(zap.String("key", "value"))
+
+ require.Panics(t, func() {
+ loggerMainWith.WithTag(TagShard)
+ })
+ loggerShard := loggerMain.WithTag(TagShard)
+ loggerShard = loggerShard.With(zap.String("key1", "value1"))
+
+ loggerMorph := loggerMain.WithTag(TagMorph)
+ loggerMorph = loggerMorph.With(zap.String("key2", "value2"))
+
+ ctx := context.Background()
+ loggerMain.Debug(ctx, "main")
+ loggerMainWith.Debug(ctx, "main with")
+ loggerShard.Debug(ctx, "shard")
+ loggerMorph.Debug(ctx, "morph")
+
+ require.Len(t, logs.All(), 4)
+ require.Len(t, logs.FilterFieldKey("key").All(), 1)
+ require.Len(t, logs.FilterFieldKey("key1").All(), 1)
+ require.Len(t, logs.FilterFieldKey("key2").All(), 1)
+ require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2)
+ require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1)
+ require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1)
+}
diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result
new file mode 100644
index 000000000..612fa2967
--- /dev/null
+++ b/pkg/util/logger/logger_test.result
@@ -0,0 +1,46 @@
+goos: linux
+goarch: amd64
+pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger
+cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
+BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op
+PASS
+ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s
diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go
new file mode 100644
index 000000000..1b98f2e62
--- /dev/null
+++ b/pkg/util/logger/tag_string.go
@@ -0,0 +1,43 @@
+// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT.
+
+package logger
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[TagMain-1]
+ _ = x[TagMorph-2]
+ _ = x[TagGrpcSvc-3]
+ _ = x[TagIr-4]
+ _ = x[TagProcessor-5]
+ _ = x[TagEngine-6]
+ _ = x[TagBlobovnicza-7]
+ _ = x[TagBlobovniczaTree-8]
+ _ = x[TagBlobstor-9]
+ _ = x[TagFSTree-10]
+ _ = x[TagGC-11]
+ _ = x[TagShard-12]
+ _ = x[TagWriteCache-13]
+ _ = x[TagDeleteSvc-14]
+ _ = x[TagGetSvc-15]
+ _ = x[TagSearchSvc-16]
+ _ = x[TagSessionSvc-17]
+ _ = x[TagTreeSvc-18]
+ _ = x[TagPolicer-19]
+ _ = x[TagReplicator-20]
+}
+
+const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator"
+
+var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148}
+
+func (i Tag) String() string {
+ i -= 1
+ if i >= Tag(len(_Tag_index)-1) {
+ return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _Tag_name[_Tag_index[i]:_Tag_index[i+1]]
+}
diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go
new file mode 100644
index 000000000..a5386707e
--- /dev/null
+++ b/pkg/util/logger/tags.go
@@ -0,0 +1,94 @@
+package logger
+
+import (
+ "fmt"
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+//go:generate stringer -type Tag -linecomment
+
+type Tag uint8
+
+const (
+ _ Tag = iota //
+ TagMain // main
+ TagMorph // morph
+ TagGrpcSvc // grpcsvc
+ TagIr // ir
+ TagProcessor // processor
+ TagEngine // engine
+ TagBlobovnicza // blobovnicza
+ TagBlobovniczaTree // blobovniczatree
+ TagBlobstor // blobstor
+ TagFSTree // fstree
+ TagGC // gc
+ TagShard // shard
+ TagWriteCache // writecache
+ TagDeleteSvc // deletesvc
+ TagGetSvc // getsvc
+ TagSearchSvc // searchsvc
+ TagSessionSvc // sessionsvc
+ TagTreeSvc // treesvc
+ TagPolicer // policer
+ TagReplicator // replicator
+
+ defaultLevel = zapcore.InfoLevel
+)
+
+var (
+ tagToLogLevel = map[Tag]zap.AtomicLevel{}
+ stringToTag = map[string]Tag{}
+)
+
+func init() {
+ for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ {
+ tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel)
+ stringToTag[i.String()] = i
+ }
+}
+
+// parseTags returns:
+// - map(always instantiated) of tag to custom log level for that tag;
+// - error if it occurred(map is empty).
+func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) {
+ m := make(map[Tag]zapcore.Level)
+ if len(raw) == 0 {
+ return m, nil
+ }
+ for _, item := range raw {
+ str, level := item[0], item[1]
+ if len(level) == 0 {
+ // It is not necessary to parse tags without level,
+ // because default log level will be used.
+ continue
+ }
+ var l zapcore.Level
+ err := l.UnmarshalText([]byte(level))
+ if err != nil {
+ return nil, err
+ }
+ tmp := strings.Split(str, ",")
+ for _, tagStr := range tmp {
+ tag, ok := stringToTag[strings.TrimSpace(tagStr)]
+ if !ok {
+ return nil, fmt.Errorf("unsupported tag %s", str)
+ }
+ m[tag] = l
+ }
+ }
+ return m, nil
+}
+
+func UpdateLevelForTags(prm Prm) {
+ for k, v := range tagToLogLevel {
+ nk, ok := prm.tl[k]
+ if ok {
+ v.SetLevel(nk)
+ } else {
+ v.SetLevel(prm.level)
+ }
+ }
+}
diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go
index f93756d17..b5b0a31eb 100644
--- a/pkg/util/logger/test/logger.go
+++ b/pkg/util/logger/test/logger.go
@@ -11,9 +11,10 @@ import (
// NewLogger creates a new logger.
func NewLogger(t testing.TB) *logger.Logger {
- var l logger.Logger
- l.Logger = zaptest.NewLogger(t,
- zaptest.Level(zapcore.DebugLevel),
- zaptest.WrapOptions(zap.Development(), zap.AddCaller()))
- return &l
+ return logger.NewLoggerWrapper(
+ zaptest.NewLogger(t,
+ zaptest.Level(zapcore.DebugLevel),
+ zaptest.WrapOptions(zap.Development(), zap.AddCaller()),
+ ),
+ )
}
diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go
new file mode 100644
index 000000000..7373e538f
--- /dev/null
+++ b/pkg/util/testing/netmap_source.go
@@ -0,0 +1,36 @@
+package testing
+
+import (
+ "context"
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
+
+var (
+ errInvalidDiff = errors.New("invalid diff")
+ errNetmapNotFound = errors.New("netmap not found")
+)
+
+type TestNetmapSource struct {
+ Netmaps map[uint64]*netmap.NetMap
+ CurrentEpoch uint64
+}
+
+func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+ if diff >= s.CurrentEpoch {
+ return nil, errInvalidDiff
+ }
+ return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff)
+}
+
+func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) {
+ if nm, found := s.Netmaps[epoch]; found {
+ return nm, nil
+ }
+ return nil, errNetmapNotFound
+}
+
+func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) {
+ return s.CurrentEpoch, nil
+}
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
index f2f8881cf..39a420358 100644
--- a/scripts/populate-metabase/internal/generate.go
+++ b/scripts/populate-metabase/internal/generate.go
@@ -1,8 +1,10 @@
package internal
import (
+ cryptorand "crypto/rand"
"crypto/sha256"
"fmt"
+ "math/rand"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -14,14 +16,13 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
- "golang.org/x/exp/rand"
)
func GeneratePayloadPool(count uint, size uint) [][]byte {
var pool [][]byte
for range count {
payload := make([]byte, size)
- _, _ = rand.Read(payload)
+ _, _ = cryptorand.Read(payload)
pool = append(pool, payload)
}
diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go
index 4da23a295..fafe61eaa 100644
--- a/scripts/populate-metabase/internal/populate.go
+++ b/scripts/populate-metabase/internal/populate.go
@@ -31,13 +31,10 @@ func PopulateWithObjects(
for range count {
obj := factory()
-
- id := []byte(fmt.Sprintf(
- "%c/%c/%c",
+ id := fmt.Appendf(nil, "%c/%c/%c",
digits[rand.Int()%len(digits)],
digits[rand.Int()%len(digits)],
- digits[rand.Int()%len(digits)],
- ))
+ digits[rand.Int()%len(digits)])
prm := meta.PutPrm{}
prm.SetObject(obj)
diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go
index 6f6b233cf..8c4ea41ad 100644
--- a/scripts/populate-metabase/main.go
+++ b/scripts/populate-metabase/main.go
@@ -91,15 +91,15 @@ func populate() (err error) {
return fmt.Errorf("couldn't open the metabase: %w", err)
}
defer func() {
- if errOnClose := db.Close(); errOnClose != nil {
+ if errOnClose := db.Close(ctx); errOnClose != nil {
err = errors.Join(
err,
- fmt.Errorf("couldn't close the metabase: %w", db.Close()),
+ fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)),
)
}
}()
- if err = db.Init(); err != nil {
+ if err = db.Init(ctx); err != nil {
return fmt.Errorf("couldn't init the metabase: %w", err)
}