diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile
new file mode 100644
index 000000000..4234de160
--- /dev/null
+++ b/.ci/Jenkinsfile
@@ -0,0 +1,81 @@
+def golang = ['1.23', '1.24']
+def golangDefault = "golang:${golang.last()}"
+
+async {
+
+ for (version in golang) {
+ def go = version
+
+ task("test/go${go}") {
+ container("golang:${go}") {
+ sh 'make test'
+ }
+ }
+
+ task("build/go${go}") {
+ container("golang:${go}") {
+ for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
+ sh """
+ make bin/frostfs-${app}
+ bin/frostfs-${app} --version
+ """
+ }
+ }
+ }
+ }
+
+ task('test/race') {
+ container(golangDefault) {
+ sh 'make test GOFLAGS="-count=1 -race"'
+ }
+ }
+
+ task('lint') {
+ container(golangDefault) {
+ sh 'make lint-install lint'
+ }
+ }
+
+ task('staticcheck') {
+ container(golangDefault) {
+ sh 'make staticcheck-install staticcheck-run'
+ }
+ }
+
+ task('gopls') {
+ container(golangDefault) {
+ sh 'make gopls-install gopls-run'
+ }
+ }
+
+ task('gofumpt') {
+ container(golangDefault) {
+ sh '''
+ make fumpt-install
+ make fumpt
+ git diff --exit-code --quiet
+ '''
+ }
+ }
+
+ task('vulncheck') {
+ container(golangDefault) {
+ sh '''
+ go install golang.org/x/vuln/cmd/govulncheck@latest
+ govulncheck ./...
+ '''
+ }
+ }
+
+ task('pre-commit') {
+ dockerfile("""
+ FROM ${golangDefault}
+ RUN apt update && \
+ apt install -y --no-install-recommends pre-commit
+ """) {
+ withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
+ sh 'pre-commit run --color=always --hook-stage=manual --all-files'
+ }
+ }
+ }
+}
diff --git a/.docker/Dockerfile.adm b/.docker/Dockerfile.adm
index 5d67a1d04..42aeebc48 100644
--- a/.docker/Dockerfile.adm
+++ b/.docker/Dockerfile.adm
@@ -1,4 +1,4 @@
-FROM golang:1.22 AS builder
+FROM golang:1.23 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ci b/.docker/Dockerfile.ci
index e9077c831..9ddd8de59 100644
--- a/.docker/Dockerfile.ci
+++ b/.docker/Dockerfile.ci
@@ -1,4 +1,4 @@
-FROM golang:1.22
+FROM golang:1.23
WORKDIR /tmp
diff --git a/.docker/Dockerfile.cli b/.docker/Dockerfile.cli
index 16f643b61..16f130056 100644
--- a/.docker/Dockerfile.cli
+++ b/.docker/Dockerfile.cli
@@ -1,4 +1,4 @@
-FROM golang:1.22 AS builder
+FROM golang:1.23 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ir b/.docker/Dockerfile.ir
index f2cb764e5..c119f8127 100644
--- a/.docker/Dockerfile.ir
+++ b/.docker/Dockerfile.ir
@@ -1,4 +1,4 @@
-FROM golang:1.22 AS builder
+FROM golang:1.23 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.storage b/.docker/Dockerfile.storage
index cf7f97748..854f7adea 100644
--- a/.docker/Dockerfile.storage
+++ b/.docker/Dockerfile.storage
@@ -1,4 +1,4 @@
-FROM golang:1.22 AS builder
+FROM golang:1.23 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.forgejo/ISSUE_TEMPLATE/bug_report.md
similarity index 100%
rename from .github/ISSUE_TEMPLATE/bug_report.md
rename to .forgejo/ISSUE_TEMPLATE/bug_report.md
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.forgejo/ISSUE_TEMPLATE/config.yml
similarity index 100%
rename from .github/ISSUE_TEMPLATE/config.yml
rename to .forgejo/ISSUE_TEMPLATE/config.yml
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.forgejo/ISSUE_TEMPLATE/feature_request.md
similarity index 100%
rename from .github/ISSUE_TEMPLATE/feature_request.md
rename to .forgejo/ISSUE_TEMPLATE/feature_request.md
diff --git a/.github/logo.svg b/.forgejo/logo.svg
similarity index 100%
rename from .github/logo.svg
rename to .forgejo/logo.svg
diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml
index ce2d64dd9..d568b9607 100644
--- a/.forgejo/workflows/build.yml
+++ b/.forgejo/workflows/build.yml
@@ -1,6 +1,10 @@
name: Build
-on: [pull_request]
+on:
+ pull_request:
+ push:
+ branches:
+ - master
jobs:
build:
@@ -8,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go_versions: [ '1.22', '1.23' ]
+ go_versions: [ '1.23', '1.24' ]
steps:
- uses: actions/checkout@v3
diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml
index 7c5af8410..190d7764a 100644
--- a/.forgejo/workflows/dco.yml
+++ b/.forgejo/workflows/dco.yml
@@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
- go-version: '1.22'
+ go-version: '1.24'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml
new file mode 100644
index 000000000..fe91d65f9
--- /dev/null
+++ b/.forgejo/workflows/oci-image.yml
@@ -0,0 +1,28 @@
+name: OCI image
+
+on:
+ push:
+ workflow_dispatch:
+
+jobs:
+ image:
+ name: Build container images
+ runs-on: docker
+ container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
+ steps:
+ - name: Clone git repo
+ uses: actions/checkout@v3
+
+ - name: Build OCI image
+ run: make images
+
+ - name: Push image to OCI registry
+ run: |
+ echo "$REGISTRY_PASSWORD" \
+ | docker login --username truecloudlab --password-stdin git.frostfs.info
+ make push-images
+ if: >-
+ startsWith(github.ref, 'refs/tags/v') &&
+ (github.event_name == 'workflow_dispatch' || github.event_name == 'push')
+ env:
+ REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml
index 8b06a2fdf..c2e293175 100644
--- a/.forgejo/workflows/pre-commit.yml
+++ b/.forgejo/workflows/pre-commit.yml
@@ -1,5 +1,10 @@
name: Pre-commit hooks
-on: [pull_request]
+
+on:
+ pull_request:
+ push:
+ branches:
+ - master
jobs:
precommit:
@@ -16,7 +21,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: 1.23
+ go-version: 1.24
- name: Set up Python
run: |
apt update
diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml
index 07ba5c268..f3f5432ce 100644
--- a/.forgejo/workflows/tests.yml
+++ b/.forgejo/workflows/tests.yml
@@ -1,5 +1,10 @@
name: Tests and linters
-on: [pull_request]
+
+on:
+ pull_request:
+ push:
+ branches:
+ - master
jobs:
lint:
@@ -11,7 +16,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.23'
+ go-version: '1.24'
cache: true
- name: Install linters
@@ -25,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go_versions: [ '1.22', '1.23' ]
+ go_versions: [ '1.23', '1.24' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@@ -48,7 +53,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.22'
+ go-version: '1.24'
cache: true
- name: Run tests
@@ -63,7 +68,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.23'
+ go-version: '1.24'
cache: true
- name: Install staticcheck
@@ -99,7 +104,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.23'
+ go-version: '1.24'
cache: true
- name: Install gofumpt
diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml
index 2951a8059..bc94792d8 100644
--- a/.forgejo/workflows/vulncheck.yml
+++ b/.forgejo/workflows/vulncheck.yml
@@ -1,5 +1,10 @@
name: Vulncheck
-on: [pull_request]
+
+on:
+ pull_request:
+ push:
+ branches:
+ - master
jobs:
vulncheck:
@@ -13,7 +18,8 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
- go-version: '1.23'
+ go-version: '1.24'
+ check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/.golangci.yml b/.golangci.yml
index 971f0d0e7..e3ec09f60 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,87 +1,107 @@
-# This file contains all available configuration options
-# with their default values.
-
-# options for analysis running
+version: "2"
run:
- # timeout for analysis, e.g. 30s, 5m, default is 1m
- timeout: 20m
-
- # include test files or not, default is true
tests: false
-
-# output configuration options
output:
- # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats:
- - format: tab
-
-# all available settings of specific linters
-linters-settings:
- exhaustive:
- # indicates that switch statements are to be considered exhaustive if a
- # 'default' case is present, even if all enum members aren't listed in the
- # switch
- default-signifies-exhaustive: true
- govet:
- # report about shadowed variables
- check-shadowing: false
- staticcheck:
- checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
- funlen:
- lines: 80 # default 60
- statements: 60 # default 40
- gocognit:
- min-complexity: 40 # default 30
- importas:
- no-unaliased: true
- no-extra-aliases: false
- alias:
- pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
- alias: objectSDK
- custom:
- truecloudlab-linters:
- path: bin/linters/external_linters.so
- original-url: git.frostfs.info/TrueCloudLab/linters.git
- settings:
- noliteral:
- target-methods : ["reportFlushError", "reportError"]
- disable-packages: ["codes", "err", "res","exec"]
- constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
-
+ tab:
+ path: stdout
+ colors: false
linters:
+ default: none
enable:
- # mandatory linters
- - govet
- - revive
-
- # some default golangci-lint linters
- - errcheck
- - gosimple
- - godot
- - ineffassign
- - staticcheck
- - typecheck
- - unused
-
- # extra linters
- bidichk
- - durationcheck
- - exhaustive
- - copyloopvar
- - gofmt
- - goimports
- - misspell
- - predeclared
- - reassign
- - whitespace
- containedctx
+ - contextcheck
+ - copyloopvar
+ - durationcheck
+ - errcheck
+ - exhaustive
- funlen
- gocognit
- - contextcheck
+ - gocritic
+ - godot
- importas
- - truecloudlab-linters
+ - ineffassign
+ - intrange
+ - misspell
- perfsprint
- - testifylint
+ - predeclared
- protogetter
- disable-all: true
- fast: false
+ - reassign
+ - revive
+ - staticcheck
+ - testifylint
+ - truecloudlab-linters
+ - unconvert
+ - unparam
+ - unused
+ - usetesting
+ - whitespace
+ settings:
+ exhaustive:
+ default-signifies-exhaustive: true
+ funlen:
+ lines: 80
+ statements: 60
+ gocognit:
+ min-complexity: 40
+ gocritic:
+ disabled-checks:
+ - ifElseChain
+ importas:
+ alias:
+ - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
+ alias: objectSDK
+ no-unaliased: true
+ no-extra-aliases: false
+ staticcheck:
+ checks:
+ - all
+ - -QF1002
+ unused:
+ field-writes-are-uses: false
+ exported-fields-are-used: false
+ local-variables-are-used: false
+ custom:
+ truecloudlab-linters:
+ path: bin/linters/external_linters.so
+ original-url: git.frostfs.info/TrueCloudLab/linters.git
+ settings:
+ noliteral:
+ constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs
+ disable-packages:
+ - codes
+ - err
+ - res
+ - exec
+ target-methods:
+ - reportFlushError
+ - reportError
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gci
+ - gofmt
+ - goimports
+ settings:
+ gci:
+ sections:
+ - standard
+ - default
+ custom-order: true
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/.woodpecker/pre-commit.yml b/.woodpecker/pre-commit.yml
deleted file mode 100644
index bdf3402de..000000000
--- a/.woodpecker/pre-commit.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-pipeline:
- # Kludge for non-root containers under WoodPecker
- fix-ownership:
- image: alpine:latest
- commands: chown -R 1234:1234 .
-
- pre-commit:
- image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36
- commands:
- - export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)"
- - pre-commit run --hook-stage manual
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e4ba6a5d6..92c84ab16 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,30 @@ Changelog for FrostFS Node
### Removed
### Updated
+## [v0.44.0] - 2024-25-11 - Rongbuk
+
+### Added
+- Allow to prioritize nodes during GET traversal via attributes (#1439)
+- Add metrics for the frostfsid cache (#1464)
+- Customize constant attributes attached to every tracing span (#1488)
+- Manage additional keys in the `frostfsid` contract (#1505)
+- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519)
+
+### Changed
+- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396)
+- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515)
+
+### Fixed
+- Fix EC object search (#1408)
+- Fix EC object put when one of the nodes is unavailable (#1427)
+
+### Removed
+- Drop most of the eACL-related code (#1425)
+- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483)
+
+### Upgrading from v0.43.0
+The metabase schema has changed completely, resync is required.
+
## [v0.42.0]
### Added
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 000000000..d19c96a5c
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1,3 @@
+.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
+.forgejo/.* @potyarkin
+Makefile @potyarkin
diff --git a/Makefile b/Makefile
index 2f29ac19c..575eaae6f 100755
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,6 @@
#!/usr/bin/make -f
SHELL = bash
+.SHELLFLAGS = -euo pipefail -c
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
@@ -7,16 +8,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
-GO_VERSION ?= 1.22
-LINT_VERSION ?= 1.60.3
-TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
+GO_VERSION ?= 1.23
+LINT_VERSION ?= 2.0.2
+TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
PROTOC_VERSION ?= 25.0
-PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
+PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux)
PROTOC_OS_VERSION=linux-x86_64
endif
-STATICCHECK_VERSION ?= 2024.1.1
+STATICCHECK_VERSION ?= 2025.1.1
ARCH = amd64
BIN = bin
@@ -27,12 +28,6 @@ DIRS = $(BIN) $(RELEASE)
CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*)))
BINS = $(addprefix $(BIN)/, $(CMDS))
-# .deb package versioning
-OS_RELEASE = $(shell lsb_release -cs)
-PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
- sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
- sed "s/-/~/")-${OS_RELEASE}
-
OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
@@ -48,7 +43,7 @@ GOFUMPT_VERSION ?= v0.7.0
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
-GOPLS_VERSION ?= v0.15.1
+GOPLS_VERSION ?= v0.17.1
GOPLS_DIR ?= $(abspath $(BIN))/gopls
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
GOPLS_TEMP_FILE := $(shell mktemp)
@@ -58,7 +53,7 @@ LOCODE_DB_PATH=$(abspath ./.cache/locode_db)
LOCODE_DB_VERSION=v0.4.0
.PHONY: help all images dep clean fmts fumpt imports test lint docker/lint
- prepare-release debpackage pre-commit unpre-commit
+ prepare-release pre-commit unpre-commit
# To build a specific binary, use it's name prefix with bin/ as a target
# For example `make bin/frostfs-node` will build only storage node binary
@@ -121,13 +116,13 @@ protoc:
# Install protoc
protoc-install:
@rm -rf $(PROTOBUF_DIR)
- @mkdir $(PROTOBUF_DIR)
+ @mkdir -p $(PROTOBUF_DIR)
@echo "⇒ Installing protoc... "
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip
@echo "⇒ Instaling protogen FrostFS plugin..."
- @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
+ @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
# Build FrostFS component's docker image
image-%:
@@ -145,6 +140,15 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
+# Push FrostFS components' docker image to the registry
+push-image-%:
+ @echo "⇒ Publish FrostFS $* docker image "
+ @docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
+
+# Push all Docker images to the registry
+.PHONY: push-images
+push-images: push-image-storage push-image-ir push-image-cli push-image-adm
+
# Run `make %` in Golang container
docker/%:
docker run --rm -t \
@@ -166,7 +170,7 @@ imports:
# Install gofumpt
fumpt-install:
@rm -rf $(GOFUMPT_DIR)
- @mkdir $(GOFUMPT_DIR)
+ @mkdir -p $(GOFUMPT_DIR)
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
# Run gofumpt
@@ -183,21 +187,44 @@ test:
@echo "⇒ Running go test"
@GOFLAGS="$(GOFLAGS)" go test ./...
+# Install Gerrit commit-msg hook
+review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
+review-install:
+ @git config remote.review.url \
+ || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
+ @mkdir -p $(GIT_HOOK_DIR)/
+ @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
+ @chmod +x $(GIT_HOOK_DIR)/commit-msg
+ @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
+ @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
+
+# Create a PR in Gerrit
+review: BRANCH ?= master
+review:
+ @git push review HEAD:refs/for/$(BRANCH) \
+ --push-option r=e.stratonikov@yadro.com \
+ --push-option r=d.stepanov@yadro.com \
+ --push-option r=an.nikiforov@yadro.com \
+ --push-option r=a.arifullin@yadro.com \
+ --push-option r=ekaterina.lebedeva@yadro.com \
+ --push-option r=a.savchuk@yadro.com \
+ --push-option r=a.chuprov@yadro.com
+
# Run pre-commit
pre-commit-run:
@pre-commit run -a --hook-stage manual
# Install linters
-lint-install:
+lint-install: $(BIN)
@rm -rf $(OUTPUT_LINT_DIR)
- @mkdir $(OUTPUT_LINT_DIR)
+ @mkdir -p $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
- @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
+ @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
@@ -209,7 +236,7 @@ lint:
# Install staticcheck
staticcheck-install:
@rm -rf $(STATICCHECK_DIR)
- @mkdir $(STATICCHECK_DIR)
+ @mkdir -p $(STATICCHECK_DIR)
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
# Run staticcheck
@@ -222,7 +249,7 @@ staticcheck-run:
# Install gopls
gopls-install:
@rm -rf $(GOPLS_DIR)
- @mkdir $(GOPLS_DIR)
+ @mkdir -p $(GOPLS_DIR)
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
# Run gopls
@@ -263,19 +290,6 @@ clean:
rm -rf $(BIN)
rm -rf $(RELEASE)
-# Package for Debian
-debpackage:
- dch -b --package frostfs-node \
- --controlmaint \
- --newversion $(PKG_VERSION) \
- --distribution $(OS_RELEASE) \
- "Please see CHANGELOG.md for code changes for $(VERSION)"
- dpkg-buildpackage --no-sign -b
-
-# Cleanup deb package build directories
-debclean:
- dh clean
-
# Download locode database
locode-download:
mkdir -p $(TMP_DIR)
@@ -289,10 +303,12 @@ env-up: all
echo "Frostfs contracts not found"; exit 1; \
fi
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
- ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
- ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
- ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
- ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
+ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
+ --storage-wallet ./dev/storage/wallet01.json \
+ --storage-wallet ./dev/storage/wallet02.json \
+ --storage-wallet ./dev/storage/wallet03.json \
+ --storage-wallet ./dev/storage/wallet04.json
+
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
make locode-download; \
fi
@@ -301,7 +317,6 @@ env-up: all
# Shutdown dev environment
env-down:
- docker compose -f dev/docker-compose.yml down
- docker volume rm -f frostfs-node_neo-go
+ docker compose -f dev/docker-compose.yml down -v
rm -rf ./$(TMP_DIR)/state
rm -rf ./$(TMP_DIR)/storage
diff --git a/README.md b/README.md
index 47d812b18..0109ed0e5 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
@@ -98,7 +98,7 @@ See `frostfs-contract`'s README.md for build instructions.
4. To create container and put object into it run (container and object IDs will be different):
```
-./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --basic-acl public-read-write --await
+./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --await
Enter password > <- press ENTER, the is no password for wallet
CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju
diff --git a/VERSION b/VERSION
index 01efe7f3a..9052dab96 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-v0.42.0
+v0.44.0
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index 81395edb0..f194e97f5 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -16,10 +16,18 @@ const (
EndpointFlagDesc = "N3 RPC node endpoint"
EndpointFlagShort = "r"
+ WalletPath = "wallet"
+ WalletPathShorthand = "w"
+ WalletPathUsage = "Path to the wallet"
+
AlphabetWalletsFlag = "alphabet-wallets"
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
+ AdminWalletPath = "wallet-admin"
+ AdminWalletUsage = "Path to the admin wallet"
+
LocalDumpFlag = "local-dump"
+ ProtoConfigPath = "protocol"
ContractsInitFlag = "contracts"
ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)"
ContractsURLFlag = "contracts-url"
diff --git a/cmd/frostfs-adm/internal/modules/config/config.go b/cmd/frostfs-adm/internal/modules/config/config.go
index a98245d01..69153f0d7 100644
--- a/cmd/frostfs-adm/internal/modules/config/config.go
+++ b/cmd/frostfs-adm/internal/modules/config/config.go
@@ -128,7 +128,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
var i innerring.GlagoliticLetter
- for i = 0; i < innerring.GlagoliticLetter(credSize); i++ {
+ for i = range innerring.GlagoliticLetter(credSize) {
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go
new file mode 100644
index 000000000..d67b70d2a
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/root.go
@@ -0,0 +1,15 @@
+package maintenance
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie"
+ "github.com/spf13/cobra"
+)
+
+var RootCmd = &cobra.Command{
+ Use: "maintenance",
+ Short: "Section for maintenance commands",
+}
+
+func init() {
+ RootCmd.AddCommand(zombie.Cmd)
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
new file mode 100644
index 000000000..1b66889aa
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
@@ -0,0 +1,70 @@
+package zombie
+
+import (
+ "crypto/ecdsa"
+ "fmt"
+ "os"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "github.com/nspcc-dev/neo-go/cli/flags"
+ "github.com/nspcc-dev/neo-go/cli/input"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey {
+ keyDesc := viper.GetString(walletFlag)
+ if keyDesc == "" {
+ return &nodeconfig.Key(appCfg).PrivateKey
+ }
+ data, err := os.ReadFile(keyDesc)
+ commonCmd.ExitOnErr(cmd, "open wallet file: %w", err)
+
+ priv, err := keys.NewPrivateKeyFromBytes(data)
+ if err != nil {
+ w, err := wallet.NewWalletFromFile(keyDesc)
+ commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err)
+ return fromWallet(cmd, w, viper.GetString(addressFlag))
+ }
+ return &priv.PrivateKey
+}
+
+func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey {
+ var (
+ addr util.Uint160
+ err error
+ )
+
+ if addrStr == "" {
+ addr = w.GetChangeAddress()
+ } else {
+ addr, err = flags.ParseAddress(addrStr)
+ commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err)
+ }
+
+ acc := w.GetAccount(addr)
+ if acc == nil {
+ commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr))
+ }
+
+ pass, err := getPassword()
+ commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err)
+
+ commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams()))
+
+ return &acc.PrivateKey().PrivateKey
+}
+
+func getPassword() (string, error) {
+ // this check allows empty passwords
+ if viper.IsSet("password") {
+ return viper.GetString("password"), nil
+ }
+
+ return input.ReadPassword("Enter password > ")
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
new file mode 100644
index 000000000..f73f33db9
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
@@ -0,0 +1,31 @@
+package zombie
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+func list(cmd *cobra.Command, _ []string) {
+ configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
+ configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ storageEngine := newEngine(cmd, appCfg)
+ q := createQuarantine(cmd, storageEngine.DumpInfo())
+ var containerID *cid.ID
+ if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" {
+ containerID = &cid.ID{}
+ commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
+ }
+
+ commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error {
+ if containerID != nil && a.Container() != *containerID {
+ return nil
+ }
+ cmd.Println(a.EncodeToString())
+ return nil
+ }))
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
new file mode 100644
index 000000000..cd3a64499
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
@@ -0,0 +1,46 @@
+package zombie
+
+import (
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "github.com/spf13/cobra"
+)
+
+func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client {
+ addresses := morphconfig.RPCEndpoint(appCfg)
+ if len(addresses) == 0 {
+ commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found"))
+ }
+ key := nodeconfig.Key(appCfg)
+ cli, err := client.New(cmd.Context(),
+ key,
+ client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
+ client.WithEndpoints(addresses...),
+ client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
+ )
+ commonCmd.ExitOnErr(cmd, "create morph client: %w", err)
+ return cli
+}
+
+func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client {
+ hs, err := morph.NNSContractAddress(client.NNSContainerContractName)
+ commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err)
+ cc, err := cntClient.NewFromMorph(morph, hs, 0)
+ commonCmd.ExitOnErr(cmd, "create morph container client: %w", err)
+ return cc
+}
+
+func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client {
+ hs, err := morph.NNSContractAddress(client.NNSNetmapContractName)
+ commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err)
+ cli, err := netmapClient.NewFromMorph(morph, hs, 0)
+ commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err)
+ return cli
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
new file mode 100644
index 000000000..27f83aec7
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
@@ -0,0 +1,154 @@
+package zombie
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+type quarantine struct {
+ // mtx protects current field.
+ mtx sync.Mutex
+ current int
+ trees []*fstree.FSTree
+}
+
+func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine {
+ var paths []string
+ for _, sh := range engineInfo.Shards {
+ var storagePaths []string
+ for _, st := range sh.BlobStorInfo.SubStorages {
+ storagePaths = append(storagePaths, st.Path)
+ }
+ if len(storagePaths) == 0 {
+ continue
+ }
+ paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine"))
+ }
+ q, err := newQuarantine(paths)
+ commonCmd.ExitOnErr(cmd, "create quarantine: %w", err)
+ return q
+}
+
+func commonPath(paths []string) string {
+ if len(paths) == 0 {
+ return ""
+ }
+ if len(paths) == 1 {
+ return paths[0]
+ }
+ minLen := math.MaxInt
+ for _, p := range paths {
+ if len(p) < minLen {
+ minLen = len(p)
+ }
+ }
+
+ var sb strings.Builder
+ for i := range minLen {
+ for _, path := range paths[1:] {
+ if paths[0][i] != path[i] {
+ return sb.String()
+ }
+ }
+ sb.WriteByte(paths[0][i])
+ }
+ return sb.String()
+}
+
+func newQuarantine(paths []string) (*quarantine, error) {
+ var q quarantine
+ for i := range paths {
+ f := fstree.New(
+ fstree.WithDepth(1),
+ fstree.WithDirNameLen(1),
+ fstree.WithPath(paths[i]),
+ fstree.WithPerm(os.ModePerm),
+ )
+ if err := f.Open(mode.ComponentReadWrite); err != nil {
+ return nil, fmt.Errorf("open fstree %s: %w", paths[i], err)
+ }
+ if err := f.Init(); err != nil {
+ return nil, fmt.Errorf("init fstree %s: %w", paths[i], err)
+ }
+ q.trees = append(q.trees, f)
+ }
+ return &q, nil
+}
+
+func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
+ for i := range q.trees {
+ res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a})
+ if err != nil {
+ continue
+ }
+ return res.Object, nil
+ }
+ return nil, &apistatus.ObjectNotFound{}
+}
+
+func (q *quarantine) Delete(ctx context.Context, a oid.Address) error {
+ for i := range q.trees {
+ _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a})
+ if err != nil {
+ continue
+ }
+ return nil
+ }
+ return &apistatus.ObjectNotFound{}
+}
+
+func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error {
+ data, err := obj.Marshal()
+ if err != nil {
+ return err
+ }
+
+ var prm common.PutPrm
+ prm.Address = objectcore.AddressOf(obj)
+ prm.Object = obj
+ prm.RawData = data
+
+ q.mtx.Lock()
+ current := q.current
+ q.current = (q.current + 1) % len(q.trees)
+ q.mtx.Unlock()
+
+ _, err = q.trees[current].Put(ctx, prm)
+ return err
+}
+
+func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error {
+ var prm common.IteratePrm
+ prm.Handler = func(elem common.IterationElement) error {
+ return f(elem.Address)
+ }
+ for i := range q.trees {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ _, err := q.trees[i].Iterate(ctx, prm)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
new file mode 100644
index 000000000..0b8f2f172
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
@@ -0,0 +1,55 @@
+package zombie
+
+import (
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+func remove(cmd *cobra.Command, _ []string) {
+ configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
+ configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ storageEngine := newEngine(cmd, appCfg)
+ q := createQuarantine(cmd, storageEngine.DumpInfo())
+
+ var containerID cid.ID
+ cidStr, _ := cmd.Flags().GetString(cidFlag)
+ commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
+
+ var objectID *oid.ID
+ oidStr, _ := cmd.Flags().GetString(oidFlag)
+ if oidStr != "" {
+ objectID = &oid.ID{}
+ commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
+ }
+
+ if objectID != nil {
+ var addr oid.Address
+ addr.SetContainer(containerID)
+ addr.SetObject(*objectID)
+ removeObject(cmd, q, addr)
+ } else {
+ commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
+ if addr.Container() != containerID {
+ return nil
+ }
+ removeObject(cmd, q, addr)
+ return nil
+ }))
+ }
+}
+
+func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) {
+ err := q.Delete(cmd.Context(), addr)
+ if errors.Is(err, new(apistatus.ObjectNotFound)) {
+ return
+ }
+ commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err)
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
new file mode 100644
index 000000000..f179c7c2d
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
@@ -0,0 +1,69 @@
+package zombie
+
+import (
+ "crypto/sha256"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+func restore(cmd *cobra.Command, _ []string) {
+ configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
+ configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ storageEngine := newEngine(cmd, appCfg)
+ q := createQuarantine(cmd, storageEngine.DumpInfo())
+ morphClient := createMorphClient(cmd, appCfg)
+ cnrCli := createContainerClient(cmd, morphClient)
+
+ var containerID cid.ID
+ cidStr, _ := cmd.Flags().GetString(cidFlag)
+ commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
+
+ var objectID *oid.ID
+ oidStr, _ := cmd.Flags().GetString(oidFlag)
+ if oidStr != "" {
+ objectID = &oid.ID{}
+ commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
+ }
+
+ if objectID != nil {
+ var addr oid.Address
+ addr.SetContainer(containerID)
+ addr.SetObject(*objectID)
+ restoreObject(cmd, storageEngine, q, addr, cnrCli)
+ } else {
+ commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
+ if addr.Container() != containerID {
+ return nil
+ }
+ restoreObject(cmd, storageEngine, q, addr, cnrCli)
+ return nil
+ }))
+ }
+}
+
+func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) {
+ obj, err := q.Get(cmd.Context(), addr)
+ commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err)
+ rawCID := make([]byte, sha256.Size)
+
+ cid := addr.Container()
+ cid.Encode(rawCID)
+ cnr, err := cnrCli.Get(cmd.Context(), rawCID)
+ commonCmd.ExitOnErr(cmd, "get container: %w", err)
+
+ putPrm := engine.PutPrm{
+ Object: obj,
+ IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value),
+ }
+ commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm))
+ commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr))
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
new file mode 100644
index 000000000..c8fd9e5e5
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
@@ -0,0 +1,123 @@
+package zombie
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+const (
+ flagBatchSize = "batch-size"
+ flagBatchSizeUsage = "Objects iteration batch size"
+ cidFlag = "cid"
+ cidFlagUsage = "Container ID"
+ oidFlag = "oid"
+ oidFlagUsage = "Object ID"
+ walletFlag = "wallet"
+ walletFlagShorthand = "w"
+ walletFlagUsage = "Path to the wallet or binary key"
+ addressFlag = "address"
+ addressFlagUsage = "Address of wallet account"
+ moveFlag = "move"
+ moveFlagUsage = "Move objects from storage engine to quarantine"
+)
+
+var (
+ Cmd = &cobra.Command{
+ Use: "zombie",
+ Short: "Zombie objects related commands",
+ }
+ scanCmd = &cobra.Command{
+ Use: "scan",
+ Short: "Scan storage engine for zombie objects and move them to quarantine",
+ Long: "",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
+ _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
+ _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag))
+ _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag))
+ _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize))
+ _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag))
+ },
+ Run: scan,
+ }
+ listCmd = &cobra.Command{
+ Use: "list",
+ Short: "List zombie objects from quarantine",
+ Long: "",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
+ _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
+ _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
+ },
+ Run: list,
+ }
+ restoreCmd = &cobra.Command{
+ Use: "restore",
+ Short: "Restore zombie objects from quarantine",
+ Long: "",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
+ _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
+ _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
+ _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
+ },
+ Run: restore,
+ }
+ removeCmd = &cobra.Command{
+ Use: "remove",
+ Short: "Remove zombie objects from quarantine",
+ Long: "",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
+ _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
+ _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
+ _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
+ },
+ Run: remove,
+ }
+)
+
+func init() {
+ initScanCmd()
+ initListCmd()
+ initRestoreCmd()
+ initRemoveCmd()
+}
+
+func initScanCmd() {
+ Cmd.AddCommand(scanCmd)
+
+ scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
+ scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
+ scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage)
+ scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage)
+ scanCmd.Flags().String(addressFlag, "", addressFlagUsage)
+ scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage)
+}
+
+func initListCmd() {
+ Cmd.AddCommand(listCmd)
+
+ listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
+ listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
+ listCmd.Flags().String(cidFlag, "", cidFlagUsage)
+}
+
+func initRestoreCmd() {
+ Cmd.AddCommand(restoreCmd)
+
+ restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
+ restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
+ restoreCmd.Flags().String(cidFlag, "", cidFlagUsage)
+ restoreCmd.Flags().String(oidFlag, "", oidFlagUsage)
+}
+
+func initRemoveCmd() {
+ Cmd.AddCommand(removeCmd)
+
+ removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
+ removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
+ removeCmd.Flags().String(cidFlag, "", cidFlagUsage)
+ removeCmd.Flags().String(oidFlag, "", oidFlagUsage)
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
new file mode 100644
index 000000000..268ec4911
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
@@ -0,0 +1,281 @@
+package zombie
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
+ clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+ "golang.org/x/sync/errgroup"
+)
+
+func scan(cmd *cobra.Command, _ []string) {
+ configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
+ configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ batchSize, _ := cmd.Flags().GetUint32(flagBatchSize)
+ if batchSize == 0 {
+ commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value"))
+ }
+ move, _ := cmd.Flags().GetBool(moveFlag)
+
+ storageEngine := newEngine(cmd, appCfg)
+ morphClient := createMorphClient(cmd, appCfg)
+ cnrCli := createContainerClient(cmd, morphClient)
+ nmCli := createNetmapClient(cmd, morphClient)
+ q := createQuarantine(cmd, storageEngine.DumpInfo())
+ pk := getPrivateKey(cmd, appCfg)
+
+ epoch, err := nmCli.Epoch(cmd.Context())
+ commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err)
+
+ nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch)
+ commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err)
+
+ cmd.Printf("Epoch: %d\n", nm.Epoch())
+ cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes()))
+
+ ps := &processStatus{
+ statusCount: make(map[status]uint64),
+ }
+
+ stopCh := make(chan struct{})
+ start := time.Now()
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ tick := time.NewTicker(time.Second)
+ defer tick.Stop()
+ for {
+ select {
+ case <-cmd.Context().Done():
+ return
+ case <-stopCh:
+ return
+ case <-tick.C:
+ fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start))
+ }
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move)
+ close(stopCh)
+ }()
+ wg.Wait()
+ commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err)
+
+ cmd.Println()
+ cmd.Println("Status description:")
+ cmd.Println("undefined -- nothing is clear")
+ cmd.Println("found -- object is found in cluster")
+ cmd.Println("quarantine -- object is not found in cluster")
+ cmd.Println()
+ for status, count := range ps.statusCount {
+ cmd.Printf("Status: %s, Count: %d\n", status, count)
+ }
+}
+
+type status string
+
+const (
+ statusUndefined status = "undefined"
+ statusFound status = "found"
+ statusQuarantine status = "quarantine"
+)
+
+func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) {
+ rawCID := make([]byte, sha256.Size)
+ cid := obj.Address.Container()
+ cid.Encode(rawCID)
+
+ cnr, err := cnrCli.Get(ctx, rawCID)
+ if err != nil {
+ var errContainerNotFound *apistatus.ContainerNotFound
+ if errors.As(err, &errContainerNotFound) {
+ // Policer will deal with this object.
+ return statusFound, nil
+ }
+ return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err)
+ }
+ nm, err := nmCli.NetMap(ctx)
+ if err != nil {
+ return statusUndefined, fmt.Errorf("read netmap from morph: %w", err)
+ }
+
+ nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID)
+ if err != nil {
+ // Not enough nodes, check all netmap nodes.
+ nodes = append([][]netmap.NodeInfo{}, nm.Nodes())
+ }
+
+ objID := obj.Address.Object()
+ cnrID := obj.Address.Container()
+ local := true
+ raw := false
+ if obj.ECInfo != nil {
+ objID = obj.ECInfo.ParentID
+ local = false
+ raw = true
+ }
+ prm := clientSDK.PrmObjectHead{
+ ObjectID: &objID,
+ ContainerID: &cnrID,
+ Local: local,
+ Raw: raw,
+ }
+
+ var ni clientCore.NodeInfo
+ for i := range nodes {
+ for j := range nodes[i] {
+ if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil {
+ return statusUndefined, fmt.Errorf("parse node info: %w", err)
+ }
+ c, err := cc.Get(ni)
+ if err != nil {
+ continue
+ }
+ res, err := c.ObjectHead(ctx, prm)
+ if err != nil {
+ var errECInfo *objectSDK.ECInfoError
+ if raw && errors.As(err, &errECInfo) {
+ return statusFound, nil
+ }
+ continue
+ }
+ if err := apistatus.ErrFromStatus(res.Status()); err != nil {
+ continue
+ }
+ return statusFound, nil
+ }
+ }
+
+ if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 {
+ return statusFound, nil
+ }
+ return statusQuarantine, nil
+}
+
+func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus,
+ appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool,
+) error {
+ cc := cache.NewSDKClientCache(cache.ClientCacheOpts{
+ DialTimeout: apiclientconfig.DialTimeout(appCfg),
+ StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
+ ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
+ Key: pk,
+ AllowExternal: apiclientconfig.AllowExternal(appCfg),
+ })
+ ctx := cmd.Context()
+
+ var cursor *engine.Cursor
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var prm engine.ListWithCursorPrm
+ prm.WithCursor(cursor)
+ prm.WithCount(batchSize)
+
+ res, err := storageEngine.ListWithCursor(ctx, prm)
+ if err != nil {
+ if errors.Is(err, engine.ErrEndOfListing) {
+ return nil
+ }
+ return fmt.Errorf("list with cursor: %w", err)
+ }
+
+ cursor = res.Cursor()
+ addrList := res.AddressList()
+ eg, egCtx := errgroup.WithContext(ctx)
+ eg.SetLimit(int(batchSize))
+
+ for i := range addrList {
+ addr := addrList[i]
+ eg.Go(func() error {
+ result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr)
+ if err != nil {
+ return fmt.Errorf("check object %s status: %w", addr.Address, err)
+ }
+ ps.add(result)
+
+ if !move && result == statusQuarantine {
+ cmd.Println(addr)
+ return nil
+ }
+
+ if result == statusQuarantine {
+ return moveToQuarantine(egCtx, storageEngine, q, addr.Address)
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return fmt.Errorf("process objects batch: %w", err)
+ }
+ }
+}
+
+func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error {
+ var getPrm engine.GetPrm
+ getPrm.WithAddress(addr)
+ res, err := storageEngine.Get(ctx, getPrm)
+ if err != nil {
+ return fmt.Errorf("get object %s from storage engine: %w", addr, err)
+ }
+
+ if err := q.Put(ctx, res.Object()); err != nil {
+ return fmt.Errorf("put object %s to quarantine: %w", addr, err)
+ }
+
+ var delPrm engine.DeletePrm
+ delPrm.WithForceRemoval()
+ delPrm.WithAddress(addr)
+
+ if err = storageEngine.Delete(ctx, delPrm); err != nil {
+ return fmt.Errorf("delete object %s from storage engine: %w", addr, err)
+ }
+ return nil
+}
+
+type processStatus struct {
+ guard sync.RWMutex
+ statusCount map[status]uint64
+ count uint64
+}
+
+func (s *processStatus) add(st status) {
+ s.guard.Lock()
+ defer s.guard.Unlock()
+ s.statusCount[st]++
+ s.count++
+}
+
+func (s *processStatus) total() uint64 {
+ s.guard.RLock()
+ defer s.guard.RUnlock()
+ return s.count
+}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
new file mode 100644
index 000000000..5be34d502
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
@@ -0,0 +1,201 @@
+package zombie
+
+import (
+ "context"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
+ shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
+ blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
+ fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/panjf2000/ants/v2"
+ "github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
+ ngOpts := storageEngineOptions(c)
+ shardOpts := shardOptions(cmd, c)
+ e := engine.New(ngOpts...)
+ for _, opts := range shardOpts {
+ _, err := e.AddShard(cmd.Context(), opts...)
+ commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
+ }
+ commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
+ commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
+ return e
+}
+
+func storageEngineOptions(c *config.Config) []engine.Option {
+ return []engine.Option{
+ engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
+ engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
+ }
+}
+
+func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
+ var result [][]shard.Option
+ err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
+ result = append(result, getShardOpts(cmd, c, sh))
+ return nil
+ })
+ commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
+ return result
+}
+
+func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
+ wc, wcEnabled := getWriteCacheOpts(sh)
+ return []shard.Option{
+ shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ shard.WithRefillMetabase(sh.RefillMetabase()),
+ shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
+ shard.WithMode(sh.Mode()),
+ shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
+ shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
+ shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
+ shard.WithWriteCache(wcEnabled),
+ shard.WithWriteCacheOptions(wc),
+ shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
+ shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
+ shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
+ shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
+ shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
+ return pool
+ }),
+ shard.WithLimiter(qos.NewNoopLimiter()),
+ }
+}
+
+func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
+ if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
+ var result []writecache.Option
+ result = append(result,
+ writecache.WithPath(wc.Path()),
+ writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
+ writecache.WithMaxObjectSize(wc.MaxObjectSize()),
+ writecache.WithFlushWorkersCount(wc.WorkerCount()),
+ writecache.WithMaxCacheSize(wc.SizeLimit()),
+ writecache.WithMaxCacheCount(wc.CountLimit()),
+ writecache.WithNoSync(wc.NoSync()),
+ writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ writecache.WithQoSLimiter(qos.NewNoopLimiter()),
+ )
+ return result, true
+ }
+ return nil, false
+}
+
+func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
+ var piloramaOpts []pilorama.Option
+ if config.BoolSafe(c.Sub("tree"), "enabled") {
+ pr := sh.Pilorama()
+ piloramaOpts = append(piloramaOpts,
+ pilorama.WithPath(pr.Path()),
+ pilorama.WithPerm(pr.Perm()),
+ pilorama.WithNoSync(pr.NoSync()),
+ pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
+ pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
+ )
+ }
+ return piloramaOpts
+}
+
+func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
+ return []meta.Option{
+ meta.WithPath(sh.Metabase().Path()),
+ meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
+ meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
+ meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
+ meta.WithBoltDBOptions(&bbolt.Options{
+ Timeout: 100 * time.Millisecond,
+ }),
+ meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ meta.WithEpochState(&epochState{}),
+ }
+}
+
+func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
+ result := []blobstor.Option{
+ blobstor.WithCompression(sh.Compression()),
+ blobstor.WithStorages(getSubStorages(ctx, sh)),
+ blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ }
+
+ return result
+}
+
+func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
+ var ss []blobstor.SubStorage
+ for _, storage := range sh.BlobStor().Storages() {
+ switch storage.Type() {
+ case blobovniczatree.Type:
+ sub := blobovniczaconfig.From((*config.Config)(storage))
+ blobTreeOpts := []blobovniczatree.Option{
+ blobovniczatree.WithRootPath(storage.Path()),
+ blobovniczatree.WithPermissions(storage.Perm()),
+ blobovniczatree.WithBlobovniczaSize(sub.Size()),
+ blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
+ blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
+ blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
+ blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
+ blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
+ blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
+ blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
+ blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
+ }
+
+ ss = append(ss, blobstor.SubStorage{
+ Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
+ return uint64(len(data)) < sh.SmallSizeLimit()
+ },
+ })
+ case fstree.Type:
+ sub := fstreeconfig.From((*config.Config)(storage))
+ fstreeOpts := []fstree.Option{
+ fstree.WithPath(storage.Path()),
+ fstree.WithPerm(storage.Perm()),
+ fstree.WithDepth(sub.Depth()),
+ fstree.WithNoSync(sub.NoSync()),
+ fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
+ }
+
+ ss = append(ss, blobstor.SubStorage{
+ Storage: fstree.New(fstreeOpts...),
+ Policy: func(_ *objectSDK.Object, _ []byte) bool {
+ return true
+ },
+ })
+ default:
+ // should never happen, that has already
+ // been handled: when the config was read
+ }
+ }
+ return ss
+}
+
+type epochState struct{}
+
+func (epochState) CurrentEpoch() uint64 {
+ return 0
+}
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
index 96cb62f10..c0c290c5e 100644
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -1,6 +1,7 @@
package metabase
import (
+ "context"
"errors"
"fmt"
"sync"
@@ -10,19 +11,25 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
+ morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ morphcontainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
const (
- pathFlag = "path"
noCompactFlag = "no-compact"
)
-var errNoPathsFound = errors.New("no metabase paths found")
-
-var path string
+var (
+ errNoPathsFound = errors.New("no metabase paths found")
+ errNoMorphEndpointsFound = errors.New("no morph endpoints found")
+ errUpgradeFailed = errors.New("upgrade failed")
+)
var UpgradeCmd = &cobra.Command{
Use: "upgrade",
@@ -39,17 +46,10 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
- noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
- var paths []string
- if path != "" {
- paths = append(paths, path)
- }
appCfg := config.New(configFile, configDir, config.EnvPrefix)
- if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
- paths = append(paths, sc.Metabase().Path())
- return nil
- }); err != nil {
- return fmt.Errorf("failed to get metabase paths: %w", err)
+ paths, err := getMetabasePaths(appCfg)
+ if err != nil {
+ return err
}
if len(paths) == 0 {
return errNoPathsFound
@@ -58,6 +58,16 @@ func upgrade(cmd *cobra.Command, _ []string) error {
for i, path := range paths {
cmd.Println(i+1, ":", path)
}
+ mc, err := createMorphClient(cmd.Context(), appCfg)
+ if err != nil {
+ return err
+ }
+ defer mc.Close()
+ civ, err := createContainerInfoProvider(mc)
+ if err != nil {
+ return err
+ }
+ noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
result := make(map[string]bool)
var resultGuard sync.Mutex
eg, ctx := errgroup.WithContext(cmd.Context())
@@ -65,7 +75,7 @@ func upgrade(cmd *cobra.Command, _ []string) error {
eg.Go(func() error {
var success bool
cmd.Println("upgrading metabase", path, "...")
- if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) {
+ if err := meta.Upgrade(ctx, path, !noCompact, civ, func(a ...any) {
cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
}); err != nil {
cmd.Println("error: failed to upgrade metabase", path, ":", err)
@@ -82,18 +92,65 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil {
return err
}
+ allSuccess := true
for mb, ok := range result {
if ok {
cmd.Println(mb, ": success")
} else {
cmd.Println(mb, ": failed")
+ allSuccess = false
}
}
- return nil
+ if allSuccess {
+ return nil
+ }
+ return errUpgradeFailed
+}
+
+func getMetabasePaths(appCfg *config.Config) ([]string, error) {
+ var paths []string
+ if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
+ paths = append(paths, sc.Metabase().Path())
+ return nil
+ }); err != nil {
+ return nil, fmt.Errorf("get metabase paths: %w", err)
+ }
+ return paths, nil
+}
+
+func createMorphClient(ctx context.Context, appCfg *config.Config) (*client.Client, error) {
+ addresses := morphconfig.RPCEndpoint(appCfg)
+ if len(addresses) == 0 {
+ return nil, errNoMorphEndpointsFound
+ }
+ key := nodeconfig.Key(appCfg)
+ cli, err := client.New(ctx,
+ key,
+ client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
+ client.WithEndpoints(addresses...),
+ client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("create morph client:%w", err)
+ }
+ return cli, nil
+}
+
+func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, error) {
+ sh, err := cli.NNSContractAddress(client.NNSContainerContractName)
+ if err != nil {
+ return nil, fmt.Errorf("resolve container contract hash: %w", err)
+ }
+ cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
+ if err != nil {
+ return nil, fmt.Errorf("create morph container client: %w", err)
+ }
+ return container.NewInfoProvider(func() (container.Source, error) {
+ return morphcontainer.AsContainerSource(cc), nil
+ }), nil
}
func initUpgradeCommand() {
flags := UpgradeCmd.Flags()
- flags.StringVar(&path, pathFlag, "", "Path to metabase file")
flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
index 077e03737..1960faab4 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
@@ -5,35 +5,19 @@ import (
"encoding/json"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
- namespaceTarget = "namespace"
- containerTarget = "container"
- userTarget = "user"
- groupTarget = "group"
- jsonFlag = "json"
- jsonFlagDesc = "Output rule chains in JSON format"
- chainIDFlag = "chain-id"
- chainIDDesc = "Rule chain ID"
- ruleFlag = "rule"
- ruleFlagDesc = "Rule chain in text format"
- pathFlag = "path"
- pathFlagDesc = "path to encoded chain in JSON or binary format"
- targetNameFlag = "target-name"
- targetNameDesc = "Resource name in APE resource name format"
- targetTypeFlag = "target-type"
- targetTypeDesc = "Resource type(container/namespace)"
- addrAdminFlag = "addr"
- addrAdminDesc = "The address of the admins wallet"
- chainNameFlag = "chain-name"
- chainNameFlagDesc = "Chain name(ingress|s3)"
+ jsonFlag = "json"
+ jsonFlagDesc = "Output rule chains in JSON format"
+ addrAdminFlag = "addr"
+ addrAdminDesc = "The address of the admins wallet"
)
var (
@@ -101,17 +85,17 @@ func initAddRuleChainCmd() {
addRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
addRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- addRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc)
- _ = addRuleChainCmd.MarkFlagRequired(targetTypeFlag)
- addRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc)
- _ = addRuleChainCmd.MarkFlagRequired(targetNameFlag)
+ addRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ addRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
- addRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc)
- _ = addRuleChainCmd.MarkFlagRequired(chainIDFlag)
- addRuleChainCmd.Flags().StringArray(ruleFlag, []string{}, ruleFlagDesc)
- addRuleChainCmd.Flags().String(pathFlag, "", pathFlagDesc)
- addRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
- addRuleChainCmd.MarkFlagsMutuallyExclusive(ruleFlag, pathFlag)
+ addRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(apeCmd.ChainIDFlag)
+ addRuleChainCmd.Flags().StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc)
+ addRuleChainCmd.Flags().String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc)
+ addRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
+ addRuleChainCmd.MarkFlagsMutuallyExclusive(apeCmd.RuleFlag, apeCmd.PathFlag)
}
func initRemoveRuleChainCmd() {
@@ -120,26 +104,25 @@ func initRemoveRuleChainCmd() {
removeRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
removeRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- removeRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(targetTypeFlag)
- removeRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(targetNameFlag)
- removeRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc)
- removeRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
+ removeRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ removeRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
+ removeRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
+ removeRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
removeRuleChainCmd.Flags().Bool(commonflags.AllFlag, false, "Remove all chains for target")
- removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, chainIDFlag)
+ removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, apeCmd.ChainIDFlag)
}
func initListRuleChainsCmd() {
Cmd.AddCommand(listRuleChainsCmd)
listRuleChainsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listRuleChainsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc)
- _ = listRuleChainsCmd.MarkFlagRequired(targetTypeFlag)
- listRuleChainsCmd.Flags().String(targetNameFlag, "", targetNameDesc)
- _ = listRuleChainsCmd.MarkFlagRequired(targetNameFlag)
+ listRuleChainsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
+ _ = listRuleChainsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ listRuleChainsCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
listRuleChainsCmd.Flags().Bool(jsonFlag, false, jsonFlagDesc)
- listRuleChainsCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
+ listRuleChainsCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
}
func initSetAdminCmd() {
@@ -161,15 +144,15 @@ func initListTargetsCmd() {
Cmd.AddCommand(listTargetsCmd)
listTargetsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listTargetsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc)
- _ = listTargetsCmd.MarkFlagRequired(targetTypeFlag)
+ listTargetsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
+ _ = listTargetsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
}
func addRuleChain(cmd *cobra.Command, _ []string) {
- chain := parseChain(cmd)
+ chain := apeCmd.ParseChain(cmd)
target := parseTarget(cmd)
pci, ac := newPolicyContractInterface(cmd)
- h, vub, err := pci.AddMorphRuleChain(parseChainName(cmd), target, chain)
+ h, vub, err := pci.AddMorphRuleChain(apeCmd.ParseChainName(cmd), target, chain)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "add rule chain error: %w", err)
@@ -181,14 +164,14 @@ func removeRuleChain(cmd *cobra.Command, _ []string) {
pci, ac := newPolicyContractInterface(cmd)
removeAll, _ := cmd.Flags().GetBool(commonflags.AllFlag)
if removeAll {
- h, vub, err := pci.RemoveMorphRuleChainsByTarget(parseChainName(cmd), target)
+ h, vub, err := pci.RemoveMorphRuleChainsByTarget(apeCmd.ParseChainName(cmd), target)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
cmd.Println("All chains for target removed successfully")
} else {
- chainID := parseChainID(cmd)
- h, vub, err := pci.RemoveMorphRuleChain(parseChainName(cmd), target, chainID)
+ chainID := apeCmd.ParseChainID(cmd)
+ h, vub, err := pci.RemoveMorphRuleChain(apeCmd.ParseChainName(cmd), target, chainID)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
@@ -199,7 +182,7 @@ func removeRuleChain(cmd *cobra.Command, _ []string) {
func listRuleChains(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
pci, _ := newPolicyContractReaderInterface(cmd)
- chains, err := pci.ListMorphRuleChains(parseChainName(cmd), target)
+ chains, err := pci.ListMorphRuleChains(apeCmd.ParseChainName(cmd), target)
commonCmd.ExitOnErr(cmd, "list rule chains error: %w", err)
if len(chains) == 0 {
return
@@ -210,14 +193,14 @@ func listRuleChains(cmd *cobra.Command, _ []string) {
prettyJSONFormat(cmd, chains)
} else {
for _, c := range chains {
- parseutil.PrintHumanReadableAPEChain(cmd, c)
+ apeCmd.PrintHumanReadableAPEChain(cmd, c)
}
}
}
func setAdmin(cmd *cobra.Command, _ []string) {
s, _ := cmd.Flags().GetString(addrAdminFlag)
- addr, err := util.Uint160DecodeStringLE(s)
+ addr, err := address.StringToUint160(s)
commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err)
pci, ac := newPolicyContractInterface(cmd)
h, vub, err := pci.SetAdmin(addr)
@@ -231,12 +214,11 @@ func getAdmin(cmd *cobra.Command, _ []string) {
pci, _ := newPolicyContractReaderInterface(cmd)
addr, err := pci.GetAdmin()
commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err)
- cmd.Println(addr.StringLE())
+ cmd.Println(address.Uint160ToString(addr))
}
func listTargets(cmd *cobra.Command, _ []string) {
- typ, err := parseTargetType(cmd)
- commonCmd.ExitOnErr(cmd, "parse target type error: %w", err)
+ typ := apeCmd.ParseTargetType(cmd)
pci, inv := newPolicyContractReaderInterface(cmd)
sid, it, err := pci.ListTargetsIterator(typ)
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
index d4aedda2e..3c332c3f0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
@@ -2,13 +2,14 @@ package ape
import (
"errors"
- "strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -18,90 +19,29 @@ import (
"github.com/spf13/viper"
)
-const (
- ingress = "ingress"
- s3 = "s3"
-)
-
-var mChainName = map[string]apechain.Name{
- ingress: apechain.Ingress,
- s3: apechain.S3,
-}
-
-var (
- errUnknownTargetType = errors.New("unknown target type")
- errChainIDCannotBeEmpty = errors.New("chain id cannot be empty")
- errRuleIsNotParsed = errors.New("rule is not passed")
- errUnsupportedChainName = errors.New("unsupported chain name")
-)
+var errUnknownTargetType = errors.New("unknown target type")
func parseTarget(cmd *cobra.Command) policyengine.Target {
- name, _ := cmd.Flags().GetString(targetNameFlag)
- typ, err := parseTargetType(cmd)
-
- // interpret "root" namespace as empty
- if typ == policyengine.Namespace && name == "root" {
- name = ""
- }
-
- commonCmd.ExitOnErr(cmd, "read target type error: %w", err)
-
- return policyengine.Target{
- Name: name,
- Type: typ,
- }
-}
-
-func parseTargetType(cmd *cobra.Command) (policyengine.TargetType, error) {
- typ, _ := cmd.Flags().GetString(targetTypeFlag)
+ typ := apeCmd.ParseTargetType(cmd)
+ name, _ := cmd.Flags().GetString(apeCmd.TargetNameFlag)
switch typ {
- case namespaceTarget:
- return policyengine.Namespace, nil
- case containerTarget:
- return policyengine.Container, nil
- case userTarget:
- return policyengine.User, nil
- case groupTarget:
- return policyengine.Group, nil
+ case policyengine.Namespace:
+ if name == "root" {
+ name = ""
+ }
+ return policyengine.NamespaceTarget(name)
+ case policyengine.Container:
+ var cnr cid.ID
+ commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
+ return policyengine.ContainerTarget(name)
+ case policyengine.User:
+ return policyengine.UserTarget(name)
+ case policyengine.Group:
+ return policyengine.GroupTarget(name)
+ default:
+ commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
}
- return -1, errUnknownTargetType
-}
-
-func parseChainID(cmd *cobra.Command) apechain.ID {
- chainID, _ := cmd.Flags().GetString(chainIDFlag)
- if chainID == "" {
- commonCmd.ExitOnErr(cmd, "read chain id error: %w",
- errChainIDCannotBeEmpty)
- }
- return apechain.ID(chainID)
-}
-
-func parseChain(cmd *cobra.Command) *apechain.Chain {
- chain := new(apechain.Chain)
-
- if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
- commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules))
- } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
- commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath))
- } else {
- commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed)
- }
-
- chain.ID = parseChainID(cmd)
-
- cmd.Println("Parsed chain:")
- parseutil.PrintHumanReadableAPEChain(cmd, chain)
-
- return chain
-}
-
-func parseChainName(cmd *cobra.Command) apechain.Name {
- chainName, _ := cmd.Flags().GetString(chainNameFlag)
- apeChainName, ok := mChainName[strings.ToLower(chainName)]
- if !ok {
- commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName)
- }
- return apeChainName
+ panic("unreachable")
}
// invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface.
@@ -115,16 +55,15 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
}
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
inv := invoker.New(c, nil)
- var ch util.Uint160
r := management.NewReader(inv)
nnsCs, err := helper.GetContractByID(r, 1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
- ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
+ ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
invokerAdapter := &invokerAdapter{
@@ -136,10 +75,11 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
}
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
- ac, err := helper.NewLocalActor(cmd, c)
+ walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
+ ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
var ch util.Uint160
diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
index 5519705d4..23dba14f4 100644
--- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
+++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -51,7 +52,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
nmHash util.Uint160
)
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return err
}
@@ -161,9 +162,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
}
- if w.Err != nil {
- panic(w.Err)
- }
+ assert.NoError(w.Err)
alphaRes, err := c.InvokeScript(w.Bytes(), nil)
if err != nil {
@@ -226,9 +225,7 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
for i := range accounts {
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
}
- if w.Err != nil {
- panic(w.Err)
- }
+ assert.NoError(w.Err)
res, err := c.Run(w.Bytes())
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go
index 3a7f84acb..c17fb62ff 100644
--- a/cmd/frostfs-adm/internal/modules/morph/config/config.go
+++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go
@@ -26,7 +26,7 @@ import (
const forceConfigSet = "force"
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
nbuf := make([]byte, 8)
- copy(nbuf[:], v)
+ copy(nbuf, v)
n := binary.LittleEndian.Uint64(nbuf)
- _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k)
}
- _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
default:
- _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
}
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/constants/const.go b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
index a3b4f129a..be4041a86 100644
--- a/cmd/frostfs-adm/internal/modules/morph/constants/const.go
+++ b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
@@ -4,7 +4,6 @@ import "time"
const (
ConsensusAccountName = "consensus"
- ProtoConfigPath = "protocol"
// MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size
// of the invocation script.
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go
index a66438975..79685f111 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -76,7 +77,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("invalid filename: %w", err)
}
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -139,13 +140,12 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) {
bw.Reset()
emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
- emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id)
res, err := inv.Run(bw.Bytes())
if err != nil {
return nil, fmt.Errorf("can't get container info: %w", err)
}
- if len(res.Stack) != 2 {
- return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse)
+ if len(res.Stack) != 1 {
+ return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse)
}
cnt := new(Container)
@@ -154,19 +154,11 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
- ea := new(EACL)
- err = ea.FromStackItem(res.Stack[1])
- if err != nil {
- return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
- }
- if len(ea.Value) != 0 {
- cnt.EACL = ea
- }
return cnt, nil
}
func listContainers(cmd *cobra.Command, _ []string) error {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -244,9 +236,7 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
putContainer(bw, ch, cnt)
- if bw.Err != nil {
- panic(bw.Err)
- }
+ assert.NoError(bw.Err)
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err
@@ -258,10 +248,6 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) {
emit.AppCall(bw.BinWriter, ch, "put", callflag.All,
cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token)
- if ea := cnt.EACL; ea != nil {
- emit.AppCall(bw.BinWriter, ch, "setEACL", callflag.All,
- ea.Value, ea.Signature, ea.PublicKey, ea.Token)
- }
}
func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) {
@@ -322,15 +308,6 @@ type Container struct {
Signature []byte `json:"signature"`
PublicKey []byte `json:"public_key"`
Token []byte `json:"token"`
- EACL *EACL `json:"eacl"`
-}
-
-// EACL represents extended ACL struct in contract storage.
-type EACL struct {
- Value []byte `json:"value"`
- Signature []byte `json:"signature"`
- PublicKey []byte `json:"public_key"`
- Token []byte `json:"token"`
}
// ToStackItem implements stackitem.Convertible.
@@ -377,50 +354,6 @@ func (c *Container) FromStackItem(item stackitem.Item) error {
return nil
}
-// ToStackItem implements stackitem.Convertible.
-func (c *EACL) ToStackItem() (stackitem.Item, error) {
- return stackitem.NewStruct([]stackitem.Item{
- stackitem.NewByteArray(c.Value),
- stackitem.NewByteArray(c.Signature),
- stackitem.NewByteArray(c.PublicKey),
- stackitem.NewByteArray(c.Token),
- }), nil
-}
-
-// FromStackItem implements stackitem.Convertible.
-func (c *EACL) FromStackItem(item stackitem.Item) error {
- arr, ok := item.Value().([]stackitem.Item)
- if !ok || len(arr) != 4 {
- return errors.New("invalid stack item type")
- }
-
- value, err := arr[0].TryBytes()
- if err != nil {
- return errors.New("invalid eACL value")
- }
-
- sig, err := arr[1].TryBytes()
- if err != nil {
- return errors.New("invalid eACL signature")
- }
-
- pub, err := arr[2].TryBytes()
- if err != nil {
- return errors.New("invalid eACL public key")
- }
-
- tok, err := arr[3].TryBytes()
- if err != nil {
- return errors.New("invalid eACL token")
- }
-
- c.Value = value
- c.Signature = sig
- c.PublicKey = pub
- c.Token = tok
- return nil
-}
-
// getCIDFilterFunc returns filtering function for container IDs.
// Raw byte slices are used because it works with structures returned
// from contract.
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
index 5adb480da..543b5fcb3 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/cli/cmdargs"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
@@ -120,9 +121,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
}
}
- if writer.Err != nil {
- panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
- }
+ assert.NoError(writer.Err, "can't create deployment script")
if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
return err
@@ -173,9 +172,8 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
}
- if bw.Err != nil {
- panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
- } else if bw.Len() != start {
+ assert.NoError(bw.Err, "can't create deployment script")
+ if bw.Len() != start {
writer.WriteBytes(bw.Bytes())
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
index be2134b77..fde58fd2b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
@@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -36,7 +37,7 @@ type contractDumpInfo struct {
}
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -219,8 +220,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" {
info.version = "unknown"
}
- _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
- info.name, info.version, info.hash.StringLE())))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
+ info.name, info.version, info.hash.StringLE()))
}
_ = tw.Flush()
@@ -236,21 +237,17 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
} else {
sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
- if sub.Err != nil {
- panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
- }
+ assert.NoError(sub.Err, "can't create version script")
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
- bw.BinWriter.WriteBytes(script)
+ bw.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}
}
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
- if bw.Err != nil {
- panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
- }
+ assert.NoError(bw.Err, "can't create version script")
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
new file mode 100644
index 000000000..4046e85e3
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
@@ -0,0 +1,83 @@
+package frostfsid
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+var (
+ frostfsidAddSubjectKeyCmd = &cobra.Command{
+ Use: "add-subject-key",
+ Short: "Add a public key to the subject in frostfsid contract",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ },
+ Run: frostfsidAddSubjectKey,
+ }
+ frostfsidRemoveSubjectKeyCmd = &cobra.Command{
+ Use: "remove-subject-key",
+ Short: "Remove a public key from the subject in frostfsid contract",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ },
+ Run: frostfsidRemoveSubjectKey,
+ }
+)
+
+func initFrostfsIDAddSubjectKeyCmd() {
+ Cmd.AddCommand(frostfsidAddSubjectKeyCmd)
+
+ ff := frostfsidAddSubjectKeyCmd.Flags()
+ ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+
+ ff.String(subjectAddressFlag, "", "Subject address")
+ _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
+
+ ff.String(subjectKeyFlag, "", "Public key to add")
+ _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
+}
+
+func initFrostfsIDRemoveSubjectKeyCmd() {
+ Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd)
+
+ ff := frostfsidRemoveSubjectKeyCmd.Flags()
+ ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+
+ ff.String(subjectAddressFlag, "", "Subject address")
+ _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
+
+ ff.String(subjectKeyFlag, "", "Public key to remove")
+ _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
+}
+
+func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) {
+ addr := getFrostfsIDSubjectAddress(cmd)
+ pub := getFrostfsIDSubjectKey(cmd)
+
+ ffsid, err := newFrostfsIDClient(cmd)
+ commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
+
+ ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub))
+
+ err = ffsid.sendWait()
+ commonCmd.ExitOnErr(cmd, "add subject key: %w", err)
+}
+
+func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) {
+ addr := getFrostfsIDSubjectAddress(cmd)
+ pub := getFrostfsIDSubjectKey(cmd)
+
+ ffsid, err := newFrostfsIDClient(cmd)
+ commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
+
+ ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub))
+
+ err = ffsid.sendWait()
+ commonCmd.ExitOnErr(cmd, "remove subject key: %w", err)
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
index 091d6634a..7f777db98 100644
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
@@ -1,6 +1,7 @@
package frostfsid
import (
+ "encoding/hex"
"errors"
"fmt"
"math/big"
@@ -34,11 +35,16 @@ const (
subjectNameFlag = "subject-name"
subjectKeyFlag = "subject-key"
subjectAddressFlag = "subject-address"
- includeNamesFlag = "include-names"
+ extendedFlag = "extended"
groupNameFlag = "group-name"
groupIDFlag = "group-id"
rootNamespacePlaceholder = ""
+
+ keyFlag = "key"
+ keyDescFlag = "Key for storing a value in the subject's KV storage"
+ valueFlag = "value"
+ valueDescFlag = "Value to be stored in the subject's KV storage"
)
var (
@@ -61,7 +67,6 @@ var (
Use: "list-namespaces",
Short: "List all namespaces in frostfsid",
PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidListNamespaces,
@@ -91,7 +96,6 @@ var (
Use: "list-subjects",
Short: "List subjects in namespace",
PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidListSubjects,
@@ -121,7 +125,6 @@ var (
Use: "list-groups",
Short: "List groups in namespace",
PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidListGroups,
@@ -151,11 +154,27 @@ var (
Use: "list-group-subjects",
Short: "List subjects in group",
PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidListGroupSubjects,
}
+
+ frostfsidSetKVCmd = &cobra.Command{
+ Use: "set-kv",
+ Short: "Store a key-value pair in the subject's KV storage",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ },
+ Run: frostfsidSetKV,
+ }
+ frostfsidDeleteKVCmd = &cobra.Command{
+ Use: "delete-kv",
+ Short: "Delete a value from the subject's KV storage",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ },
+ Run: frostfsidDeleteKV,
+ }
)
func initFrostfsIDCreateNamespaceCmd() {
@@ -169,7 +188,6 @@ func initFrostfsIDCreateNamespaceCmd() {
func initFrostfsIDListNamespacesCmd() {
Cmd.AddCommand(frostfsidListNamespacesCmd)
frostfsidListNamespacesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidListNamespacesCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initFrostfsIDCreateSubjectCmd() {
@@ -192,8 +210,7 @@ func initFrostfsIDListSubjectsCmd() {
Cmd.AddCommand(frostfsidListSubjectsCmd)
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
- frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
- frostfsidListSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
}
func initFrostfsIDCreateGroupCmd() {
@@ -217,7 +234,6 @@ func initFrostfsIDListGroupsCmd() {
Cmd.AddCommand(frostfsidListGroupsCmd)
frostfsidListGroupsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListGroupsCmd.Flags().String(namespaceFlag, "", "Namespace to list groups")
- frostfsidListGroupsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initFrostfsIDAddSubjectToGroupCmd() {
@@ -241,8 +257,22 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
- frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
- frostfsidListGroupSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
+}
+
+func initFrostfsIDSetKVCmd() {
+ Cmd.AddCommand(frostfsidSetKVCmd)
+ frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
+ frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag)
+ frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag)
+}
+
+func initFrostfsIDDeleteKVCmd() {
+ Cmd.AddCommand(frostfsidDeleteKVCmd)
+ frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
+ frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag)
}
func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
@@ -262,7 +292,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
- items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
+ items, err := readIterator(inv, &it, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items)
@@ -307,34 +337,32 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
}
func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
- includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
+ extended, _ := cmd.Flags().GetBool(extendedFlag)
ns := getFrostfsIDNamespace(cmd)
inv, _, hash := initInvoker(cmd)
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
- subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
+ subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
for _, addr := range subAddresses {
- if !includeNames {
+ if !extended {
cmd.Println(address.Uint160ToString(addr))
continue
}
- sessionID, it, err := reader.ListSubjects()
+ items, err := reader.GetSubject(addr)
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
- items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
- commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
-
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
- cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
+ printSubjectInfo(cmd, addr, subj)
+ cmd.Println()
}
}
@@ -374,7 +402,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
- items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
+ items, err := readIterator(inv, &it, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@@ -412,10 +440,49 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err)
}
+func frostfsidSetKV(cmd *cobra.Command, _ []string) {
+ subjectAddress := getFrostfsIDSubjectAddress(cmd)
+ key, _ := cmd.Flags().GetString(keyFlag)
+ value, _ := cmd.Flags().GetString(valueFlag)
+
+ if key == "" {
+ commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
+ }
+
+ ffsid, err := newFrostfsIDClient(cmd)
+ commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
+
+ method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value)
+
+ ffsid.addCall(method, args)
+
+ err = ffsid.sendWait()
+ commonCmd.ExitOnErr(cmd, "set KV: %w", err)
+}
+
+func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
+ subjectAddress := getFrostfsIDSubjectAddress(cmd)
+ key, _ := cmd.Flags().GetString(keyFlag)
+
+ if key == "" {
+ commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
+ }
+
+ ffsid, err := newFrostfsIDClient(cmd)
+ commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
+
+ method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key)
+
+ ffsid.addCall(method, args)
+
+ err = ffsid.sendWait()
+ commonCmd.ExitOnErr(cmd, "delete KV: %w", err)
+}
+
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd)
- includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
+ extended, _ := cmd.Flags().GetBool(extendedFlag)
inv, cs, hash := initInvoker(cmd)
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
@@ -424,7 +491,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
- items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
+ items, err := readIterator(inv, &it, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@@ -433,7 +500,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
for _, subjAddr := range subjects {
- if !includeNames {
+ if !extended {
cmd.Println(address.Uint160ToString(subjAddr))
continue
}
@@ -442,7 +509,8 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
- cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
+ printSubjectInfo(cmd, subjAddr, subj)
+ cmd.Println()
}
}
@@ -497,32 +565,28 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
}
f.bw.Reset()
- if len(f.wCtx.SentTxs) == 0 {
- return nil, errors.New("no transactions to wait")
- }
-
f.wCtx.Command.Println("Waiting for transactions to persist...")
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
}
-func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
+func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool
res := make([]stackitem.Item, 0)
for !shouldStop {
- items, err := inv.TraverseIterator(sessionID, iter, batchSize)
+ items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize)
if err != nil {
return nil, err
}
res = append(res, items...)
- shouldStop = len(items) < batchSize
+ shouldStop = len(items) < iteratorBatchSize
}
return res, nil
}
func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)
@@ -536,3 +600,30 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui
return inv, cs, nmHash
}
+
+func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
+ cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
+ pk := ""
+ if subj.PrimaryKey != nil {
+ pk = hex.EncodeToString(subj.PrimaryKey.Bytes())
+ }
+ cmd.Printf("Primary key: %s\n", pk)
+ cmd.Printf("Name: %s\n", subj.Name)
+ cmd.Printf("Namespace: %s\n", subj.Namespace)
+ if len(subj.AdditionalKeys) > 0 {
+ cmd.Printf("Additional keys:\n")
+ for _, key := range subj.AdditionalKeys {
+ k := ""
+ if key != nil {
+ k = hex.EncodeToString(key.Bytes())
+ }
+ cmd.Printf("- %s\n", k)
+ }
+ }
+ if len(subj.KV) > 0 {
+ cmd.Printf("KV:\n")
+ for k, v := range subj.KV {
+ cmd.Printf("- %s: %s\n", k, v)
+ }
+ }
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go
index cce859d2f..1d0bc8441 100644
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go
@@ -1,59 +1,12 @@
package frostfsid
import (
- "encoding/hex"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/spf13/viper"
"github.com/stretchr/testify/require"
)
-func TestFrostfsIDConfig(t *testing.T) {
- pks := make([]*keys.PrivateKey, 4)
- for i := range pks {
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- pks[i] = pk
- }
-
- fmts := []string{
- pks[0].GetScriptHash().StringLE(),
- address.Uint160ToString(pks[1].GetScriptHash()),
- hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
- hex.EncodeToString(pks[3].PublicKey().Bytes()),
- }
-
- for i := range fmts {
- v := viper.New()
- v.Set("frostfsid.admin", fmts[i])
-
- actual, found, err := helper.GetFrostfsIDAdmin(v)
- require.NoError(t, err)
- require.True(t, found)
- require.Equal(t, pks[i].GetScriptHash(), actual)
- }
-
- t.Run("bad key", func(t *testing.T) {
- v := viper.New()
- v.Set("frostfsid.admin", "abc")
-
- _, found, err := helper.GetFrostfsIDAdmin(v)
- require.Error(t, err)
- require.True(t, found)
- })
- t.Run("missing key", func(t *testing.T) {
- v := viper.New()
-
- _, found, err := helper.GetFrostfsIDAdmin(v)
- require.NoError(t, err)
- require.False(t, found)
- })
-}
-
func TestNamespaceRegexp(t *testing.T) {
for _, tc := range []struct {
name string
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go
index 850474794..8aad5c5c1 100644
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go
@@ -12,4 +12,8 @@ func init() {
initFrostfsIDAddSubjectToGroupCmd()
initFrostfsIDRemoveSubjectFromGroupCmd()
initFrostfsIDListGroupSubjectsCmd()
+ initFrostfsIDSetKVCmd()
+ initFrostfsIDDeleteKVCmd()
+ initFrostfsIDAddSubjectKeyCmd()
+ initFrostfsIDRemoveSubjectKeyCmd()
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
index 7af776797..78f8617f1 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
@@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
@@ -141,60 +140,29 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
}
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
- return refillGas(cmd, storageGasConfigFlag, true)
-}
-
-func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
- // storage wallet path is not part of the config
- storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
- // wallet address is not part of the config
- walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
-
- var gasReceiver util.Uint160
-
- if len(walletAddress) != 0 {
- gasReceiver, err = address.StringToUint160(walletAddress)
- if err != nil {
- return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
- }
- } else {
- if storageWalletPath == "" {
- return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag)
- }
-
- var w *wallet.Wallet
-
- if createWallet {
- w, err = wallet.NewWallet(storageWalletPath)
- } else {
- w, err = wallet.NewWalletFromFile(storageWalletPath)
- }
-
- if err != nil {
- return fmt.Errorf("can't create wallet: %w", err)
- }
-
- if createWallet {
- var password string
-
- label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
- password, err := config.GetStoragePassword(viper.GetViper(), label)
- if err != nil {
- return fmt.Errorf("can't fetch password: %w", err)
- }
-
- if label == "" {
- label = constants.SingleAccountName
- }
-
- if err := w.CreateAccount(label, password); err != nil {
- return fmt.Errorf("can't create account: %w", err)
- }
- }
-
- gasReceiver = w.Accounts[0].Contract.ScriptHash()
+ walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
+ w, err := wallet.NewWallet(walletPath)
+ if err != nil {
+ return fmt.Errorf("create wallet: %w", err)
}
+ label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
+ password, err := config.GetStoragePassword(viper.GetViper(), label)
+ if err != nil {
+ return fmt.Errorf("can't fetch password: %w", err)
+ }
+
+ if label == "" {
+ label = constants.SingleAccountName
+ }
+
+ if err := w.CreateAccount(label, password); err != nil {
+ return fmt.Errorf("can't create account: %w", err)
+ }
+ return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
+}
+
+func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
gasStr := viper.GetString(gasFlag)
gasAmount, err := helper.ParseGASAmount(gasStr)
@@ -208,9 +176,11 @@ func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error
}
bw := io.NewBufBinWriter()
- emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
- wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
- emit.Opcodes(bw.BinWriter, opcode.ASSERT)
+ for _, gasReceiver := range gasReceivers {
+ emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
+ wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
+ emit.Opcodes(bw.BinWriter, opcode.ASSERT)
+ }
if bw.Err != nil {
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go
index 1dd6420eb..15af5637b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go
@@ -63,7 +63,7 @@ func TestGenerateAlphabet(t *testing.T) {
buf.Reset()
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
- for i := uint64(0); i < size; i++ {
+ for i := range uint64(size) {
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go
index 3633d9a8e..73c986713 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate/root.go
@@ -1,7 +1,12 @@
package generate
import (
+ "fmt"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -33,7 +38,27 @@ var (
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
},
RunE: func(cmd *cobra.Command, _ []string) error {
- return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
+ storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
+ walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
+
+ var gasReceivers []util.Uint160
+ for _, walletAddress := range walletAddresses {
+ addr, err := address.StringToUint160(walletAddress)
+ if err != nil {
+ return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
+ }
+
+ gasReceivers = append(gasReceivers, addr)
+ }
+ for _, storageWalletPath := range storageWalletPaths {
+ w, err := wallet.NewWalletFromFile(storageWalletPath)
+ if err != nil {
+ return fmt.Errorf("can't create wallet: %w", err)
+ }
+
+ gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
+ }
+ return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
},
}
GenerateAlphabetCmd = &cobra.Command{
@@ -50,10 +75,10 @@ var (
func initRefillGasCmd() {
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
- RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
+ RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
+ RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
- RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
+ RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
}
func initGenerateStorageCmd() {
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go
index 1ca246f9f..6499ace5f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go
@@ -3,10 +3,6 @@ package helper
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@@ -17,7 +13,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -29,44 +24,88 @@ type LocalActor struct {
rpcInvoker invoker.RPCInvoke
}
+type AlphabetWallets struct {
+ Label string
+ Path string
+}
+
+func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) {
+ w, err := GetAlphabetWallets(v, a.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ var accounts []*wallet.Account
+ for _, wall := range w {
+ acc, err := GetWalletAccount(wall, a.Label)
+ if err != nil {
+ return nil, err
+ }
+ accounts = append(accounts, acc)
+ }
+ return accounts, nil
+}
+
+type RegularWallets struct{ Path string }
+
+func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) {
+ w, err := getRegularWallet(r.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil
+}
+
// NewLocalActor create LocalActor with accounts form provided wallets.
// In case of empty wallets provided created actor with dummy account only for read operation.
-func NewLocalActor(cmd *cobra.Command, c actor.RPCActor) (*LocalActor, error) {
- walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
+//
+// If wallets are provided, the contract client will use accounts with accName name from these wallets.
+// To determine which account name should be used in a contract client, refer to how the contract
+// verifies the transaction signature.
+func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) {
var act *actor.Actor
var accounts []*wallet.Account
- if walletDir == "" {
- account, err := wallet.NewAccount()
- commonCmd.ExitOnErr(cmd, "unable to create dummy account: %w", err)
- act, err = actor.New(c, []actor.SignerAccount{{
- Signer: transaction.Signer{
- Account: account.Contract.ScriptHash(),
- Scopes: transaction.Global,
- },
- Account: account,
- }})
- if err != nil {
- return nil, err
- }
- } else {
- wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
- commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
+ var signers []actor.SignerAccount
- for _, w := range wallets {
- acc, err := GetWalletAccount(w, constants.CommitteeAccountName)
- commonCmd.ExitOnErr(cmd, "can't find committee account: %w", err)
- accounts = append(accounts, acc)
- }
- act, err = actor.New(c, []actor.SignerAccount{{
- Signer: transaction.Signer{
- Account: accounts[0].Contract.ScriptHash(),
- Scopes: transaction.Global,
- },
- Account: accounts[0],
- }})
+ if alphabet != nil {
+ account, err := alphabet.GetAccount(viper.GetViper())
if err != nil {
return nil, err
}
+
+ accounts = append(accounts, account...)
+ signers = append(signers, actor.SignerAccount{
+ Signer: transaction.Signer{
+ Account: account[0].Contract.ScriptHash(),
+ Scopes: transaction.Global,
+ },
+ Account: account[0],
+ })
+ }
+
+ for _, w := range regularWallets {
+ if w == nil {
+ continue
+ }
+ account, err := w.GetAccount()
+ if err != nil {
+ return nil, err
+ }
+
+ accounts = append(accounts, account...)
+ signers = append(signers, actor.SignerAccount{
+ Signer: transaction.Signer{
+ Account: account[0].Contract.ScriptHash(),
+ Scopes: transaction.Global,
+ },
+ Account: account[0],
+ })
+ }
+
+ act, err := actor.New(c, signers)
+ if err != nil {
+ return nil, err
}
return &LocalActor{
neoActor: act,
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
index 2011301d1..64d1c6393 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
@@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any
h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker)
}
if method != constants.UpdateMethodName || err == nil && !found {
- h, found, err = GetFrostfsIDAdmin(viper.GetViper())
+ h, found, err = getFrostfsIDAdmin(viper.GetViper())
}
if err != nil {
return nil, err
@@ -166,5 +166,6 @@ func DeployNNS(c *InitializeContext, method string) error {
return fmt.Errorf("can't send deploy transaction: %w", err)
}
+ c.Command.Println("NNS hash:", invokeHash.StringLE())
return c.AwaitTx()
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go
index f29042b82..fce2dfb74 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go
@@ -11,7 +11,7 @@ import (
const frostfsIDAdminConfigKey = "frostfsid.admin"
-func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
+func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
admin := v.GetString(frostfsIDAdminConfigKey)
if admin == "" {
return util.Uint160{}, false, nil
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go
new file mode 100644
index 000000000..38991e962
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go
@@ -0,0 +1,53 @@
+package helper
+
+import (
+ "encoding/hex"
+ "testing"
+
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/spf13/viper"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFrostfsIDConfig(t *testing.T) {
+ pks := make([]*keys.PrivateKey, 4)
+ for i := range pks {
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ pks[i] = pk
+ }
+
+ fmts := []string{
+ pks[0].GetScriptHash().StringLE(),
+ address.Uint160ToString(pks[1].GetScriptHash()),
+ hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
+ hex.EncodeToString(pks[3].PublicKey().Bytes()),
+ }
+
+ for i := range fmts {
+ v := viper.New()
+ v.Set("frostfsid.admin", fmts[i])
+
+ actual, found, err := getFrostfsIDAdmin(v)
+ require.NoError(t, err)
+ require.True(t, found)
+ require.Equal(t, pks[i].GetScriptHash(), actual)
+ }
+
+ t.Run("bad key", func(t *testing.T) {
+ v := viper.New()
+ v.Set("frostfsid.admin", "abc")
+
+ _, found, err := getFrostfsIDAdmin(v)
+ require.Error(t, err)
+ require.True(t, found)
+ })
+ t.Run("missing key", func(t *testing.T) {
+ v := viper.New()
+
+ _, found, err := getFrostfsIDAdmin(v)
+ require.NoError(t, err)
+ require.False(t, found)
+ })
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go
index 961ceba53..50b5c1ec7 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go
@@ -6,6 +6,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@@ -13,9 +14,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -187,19 +186,9 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
}
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
- switch c.(type) {
- case *rpcclient.Client:
- inv := invoker.New(c, nil)
- reader := nns2.NewReader(inv, nnsHash)
- return reader.IsAvailable(name)
- default:
- b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
- if err != nil {
- return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
- }
-
- return b, nil
- }
+ inv := invoker.New(c, nil)
+ reader := nns2.NewReader(inv, nnsHash)
+ return reader.IsAvailable(name)
}
func CheckNotaryEnabled(c Client) error {
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go
index b5b6adf05..da5ffedae 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go
@@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -21,6 +22,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
@@ -28,7 +30,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
- "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -134,12 +135,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex
return nil, err
}
- accounts, err := createWalletAccounts(wallets)
+ accounts, err := getSingleAccounts(wallets)
if err != nil {
return nil, err
}
- cliCtx, err := DefaultClientContext(c, committeeAcc)
+ cliCtx, err := defaultClientContext(c, committeeAcc)
if err != nil {
return nil, fmt.Errorf("client context: %w", err)
}
@@ -191,7 +192,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet)
}
c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String())
} else {
- c, err = GetN3Client(v)
+ c, err = NewRemoteClient(v)
}
if err != nil {
return nil, fmt.Errorf("can't create N3 client: %w", err)
@@ -211,7 +212,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
return ctrPath, nil
}
-func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
+func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
accounts := make([]*wallet.Account, len(wallets))
for i, w := range wallets {
acc, err := GetWalletAccount(w, constants.SingleAccountName)
@@ -375,9 +376,7 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen
}
act, err = actor.New(c.Client, signers)
} else {
- if withConsensus {
- panic("BUG: should never happen")
- }
+ assert.False(withConsensus, "BUG: should never happen")
act, err = c.CommitteeAct, nil
}
if err != nil {
@@ -411,11 +410,9 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp
func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error {
version, err := c.Client.GetVersion()
- if err != nil {
- // error appears only if client
- // has not been initialized
- panic(err)
- }
+ // error appears only if client
+ // has not been initialized
+ assert.NoError(err)
network := version.Protocol.Network
// Use parameter context to avoid dealing with signature order.
@@ -447,12 +444,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin
for i := range tx.Signers {
if tx.Signers[i].Account == h {
+ assert.True(i <= len(tx.Scripts), "BUG: invalid signing order")
if i < len(tx.Scripts) {
tx.Scripts[i] = *w
- } else if i == len(tx.Scripts) {
+ }
+ if i == len(tx.Scripts) {
tx.Scripts = append(tx.Scripts, *w)
- } else {
- panic("BUG: invalid signing order")
}
return nil
}
@@ -510,9 +507,7 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
- if bw.Err != nil {
- panic(bw.Err)
- }
+ assert.NoError(bw.Err)
return bw.Bytes(), false, nil
}
@@ -524,12 +519,8 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
}
func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
- res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone)
- if err != nil {
- return false, err
- }
-
- return res.State == vmstate.Halt.String(), nil
+ avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone))
+ return !avail, err
}
func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool {
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
index 375fa84d7..46611c177 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
@@ -8,7 +8,9 @@ import (
"sort"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/config"
"github.com/nspcc-dev/neo-go/pkg/core"
@@ -47,7 +49,7 @@ type LocalClient struct {
}
func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*LocalClient, error) {
- cfg, err := config.LoadFile(v.GetString(constants.ProtoConfigPath))
+ cfg, err := config.LoadFile(v.GetString(commonflags.ProtoConfigPath))
if err != nil {
return nil, err
}
@@ -57,17 +59,59 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
return nil, err
}
- m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
- accounts := make([]*wallet.Account, len(wallets))
- for i := range accounts {
- accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName)
- if err != nil {
- return nil, err
+ go bc.Run()
+
+ accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets)
+ if err != nil {
+ return nil, err
+ }
+
+ if cmd.Name() != "init" {
+ if err := restoreDump(bc, dumpPath); err != nil {
+ return nil, fmt.Errorf("restore dump: %w", err)
}
}
+ return &LocalClient{
+ bc: bc,
+ dumpPath: dumpPath,
+ accounts: accounts,
+ }, nil
+}
+
+func restoreDump(bc *core.Blockchain, dumpPath string) error {
+ f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
+ if err != nil {
+ return fmt.Errorf("can't open local dump: %w", err)
+ }
+ defer f.Close()
+
+ r := io.NewBinReaderFromIO(f)
+
+ var skip uint32
+ if bc.BlockHeight() != 0 {
+ skip = bc.BlockHeight() + 1
+ }
+
+ count := r.ReadU32LE() - skip
+ if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) {
+ accounts := make([]*wallet.Account, len(wallets))
+ for i := range accounts {
+ acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName)
+ if err != nil {
+ return nil, err
+ }
+ accounts[i] = acc
+ }
+
indexMap := make(map[string]int)
- for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee {
+ for i, pub := range cfg.StandbyCommittee {
indexMap[pub] = i
}
@@ -76,37 +120,12 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
pj := accounts[j].PrivateKey().PublicKey().Bytes()
return indexMap[string(pi)] < indexMap[string(pj)]
})
- sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool {
+ sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool {
return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
})
- go bc.Run()
-
- if cmd.Name() != "init" {
- f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
- if err != nil {
- return nil, fmt.Errorf("can't open local dump: %w", err)
- }
- defer f.Close()
-
- r := io.NewBinReaderFromIO(f)
-
- var skip uint32
- if bc.BlockHeight() != 0 {
- skip = bc.BlockHeight() + 1
- }
-
- count := r.ReadU32LE() - skip
- if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
- return nil, fmt.Errorf("can't restore local dump: %w", err)
- }
- }
-
- return &LocalClient{
- bc: bc,
- dumpPath: dumpPath,
- accounts: accounts[:m],
- }, nil
+ m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount))
+ return accounts[:m], nil
}
func (l *LocalClient) GetBlockCount() (uint32, error) {
@@ -127,11 +146,6 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul
return &a, nil
}
-func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) {
- // not used by `morph init` command
- panic("unexpected call")
-}
-
// InvokeFunction is implemented via `InvokeScript`.
func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
var err error
@@ -295,13 +309,7 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer)
}
func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
- // We need to test that transaction was formed correctly to catch as many errors as we can.
- bs := tx.Bytes()
- _, err := transaction.NewTransactionFromBytes(bs)
- if err != nil {
- return tx.Hash(), fmt.Errorf("invalid transaction: %w", err)
- }
-
+ tx = tx.Copy()
l.transactions = append(l.transactions, tx)
return tx.Hash(), nil
}
@@ -309,9 +317,7 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint
func (l *LocalClient) putTransactions() error {
// 1. Prepare new block.
lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
- if err != nil {
- panic(err)
- }
+ assert.NoError(err)
defer func() { l.transactions = l.transactions[:0] }()
b := &block.Block{
@@ -352,9 +358,7 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s
w := io.NewBufBinWriter()
emit.Array(w.BinWriter, parameters...)
emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All)
- if w.Err != nil {
- panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
- }
+ assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
return c.InvokeScript(w.Bytes(), signers)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go
index e62a21b3f..3f3a66cb6 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
@@ -25,15 +24,10 @@ import (
// Client represents N3 client interface capable of test-invoking scripts
// and sending signed transactions to chain.
type Client interface {
- invoker.RPCInvoke
+ actor.RPCActor
- GetBlockCount() (uint32, error)
GetNativeContracts() ([]state.Contract, error)
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
- GetVersion() (*result.Version, error)
- SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
- GetCommittee() (keys.PublicKeys, error)
- CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
}
type HashVUBPair struct {
@@ -48,7 +42,7 @@ type ClientContext struct {
SentTxs []HashVUBPair
}
-func GetN3Client(v *viper.Viper) (Client, error) {
+func NewRemoteClient(v *viper.Viper) (Client, error) {
// number of opened connections
// by neo-go client per one host
const (
@@ -88,8 +82,14 @@ func GetN3Client(v *viper.Viper) (Client, error) {
return c, nil
}
-func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
- commAct, err := NewActor(c, committeeAcc)
+func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
+ commAct, err := actor.New(c, []actor.SignerAccount{{
+ Signer: transaction.Signer{
+ Account: committeeAcc.Contract.ScriptHash(),
+ Scopes: transaction.Global,
+ },
+ Account: committeeAcc,
+ }})
if err != nil {
return nil, err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
index 7a778f8c3..20abaff0a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
@@ -3,6 +3,7 @@ package helper
import (
"errors"
"fmt"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@@ -72,13 +73,17 @@ func InvalidConfigValueErr(key string) error {
return fmt.Errorf("invalid %s config value from netmap contract", key)
}
-func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160) error {
+func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160, countEpoch int64) error {
+ if countEpoch <= 0 {
+ return errors.New("number of epochs cannot be less than 1")
+ }
+
curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch"))
if err != nil {
return errors.New("can't fetch current epoch from the netmap contract")
}
- newEpoch := curr + 1
+ newEpoch := curr + countEpoch
wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch)
// In NeoFS this is done via Notary contract. Here, however, we can form the
@@ -114,11 +119,8 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
return err
}
for k, v := range m {
- for _, key := range NetmapConfigKeys {
- if k == key {
- md[k] = v
- break
- }
+ if slices.Contains(NetmapConfigKeys, k) {
+ md[k] = v
}
}
return nil
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
index 8c6b90539..be6b2c6dd 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
@@ -14,16 +14,36 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
+ "github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/viper"
)
+func getRegularWallet(walletPath string) (*wallet.Wallet, error) {
+ w, err := wallet.NewWalletFromFile(walletPath)
+ if err != nil {
+ return nil, err
+ }
+
+ password, err := input.ReadPassword("Enter password for wallet:")
+ if err != nil {
+ return nil, fmt.Errorf("can't fetch password: %w", err)
+ }
+
+ for i := range w.Accounts {
+ if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
+ err = fmt.Errorf("can't unlock wallet: %w", err)
+ break
+ }
+ }
+
+ return w, err
+}
+
func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
wallets, err := openAlphabetWallets(v, walletDir)
if err != nil {
@@ -53,7 +73,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
if errors.Is(err, os.ErrNotExist) {
err = nil
} else {
- err = fmt.Errorf("can't open wallet: %w", err)
+ err = fmt.Errorf("can't open alphabet wallet: %w", err)
}
break
}
@@ -87,16 +107,6 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
return wallets, nil
}
-func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) {
- return actor.New(c, []actor.SignerAccount{{
- Signer: transaction.Signer{
- Account: committeeAcc.Contract.ScriptHash(),
- Scopes: transaction.Global,
- },
- Account: committeeAcc,
- }})
-}
-
func ReadContract(ctrPath, ctrName string) (*ContractState, error) {
rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go
index e127ca545..176356378 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -111,9 +112,7 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b
emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
- if w.Err != nil {
- panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err))
- }
+ assert.NoError(w.Err, "can't wrap register script")
}
func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go
index 4c6607f9a..7b7597d91 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go
@@ -1,21 +1,18 @@
package initialize
import (
- "errors"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/pkg/core/native"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -30,7 +27,8 @@ const (
)
func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
- regPrice, err := getCandidateRegisterPrice(c)
+ reader := neo.NewReader(c.ReadOnlyInvoker)
+ regPrice, err := reader.GetRegisterPrice()
if err != nil {
return fmt.Errorf("can't fetch registration price: %w", err)
}
@@ -42,9 +40,7 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
emit.Opcodes(w.BinWriter, opcode.ASSERT)
}
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
- if w.Err != nil {
- panic(fmt.Sprintf("BUG: %v", w.Err))
- }
+ assert.NoError(w.Err)
signers := []actor.SignerAccount{{
Signer: c.GetSigner(false, c.CommitteeAcc),
@@ -116,7 +112,7 @@ func registerCandidates(c *helper.InitializeContext) error {
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
neoHash := neo.Hash
- ok, err := transferNEOFinished(c, neoHash)
+ ok, err := transferNEOFinished(c)
if ok || err != nil {
return err
}
@@ -139,33 +135,8 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
return c.AwaitTx()
}
-func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
- r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
+func transferNEOFinished(c *helper.InitializeContext) (bool, error) {
+ r := neo.NewReader(c.ReadOnlyInvoker)
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
}
-
-var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
-
-func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
- switch c.Client.(type) {
- case *rpcclient.Client:
- inv := invoker.New(c.Client, nil)
- reader := neo.NewReader(inv)
- return reader.GetRegisterPrice()
- default:
- neoHash := neo.Hash
- res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
- if err != nil {
- return 0, err
- }
- if len(res.Stack) == 0 {
- return 0, errGetPriceInvalid
- }
- bi, err := res.Stack[0].TryInteger()
- if err != nil || !bi.IsInt64() {
- return 0, errGetPriceInvalid
- }
- return bi.Int64(), nil
- }
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
index a6815ee13..05bc83a8b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
@@ -1,6 +1,8 @@
package initialize
import (
+ "fmt"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -29,10 +31,14 @@ func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error {
callflag.States|callflag.AllowNotify, int64(noderoles.NeoFSAlphabet), pubs)
if err := c.SendCommitteeTx(w.Bytes(), false); err != nil {
- return err
+ return fmt.Errorf("send committee transaction: %w", err)
}
- return c.AwaitTx()
+ err := c.AwaitTx()
+ if err != nil {
+ err = fmt.Errorf("await committee transaction: %w", err)
+ }
+ return err
}
func setRolesFinished(c *helper.InitializeContext) (bool, error) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
index 74f5d3e88..9bc51c055 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
@@ -62,7 +62,7 @@ func testInitialize(t *testing.T, committeeSize int) {
v := viper.GetViper()
require.NoError(t, generateTestData(testdataDir, committeeSize))
- v.Set(constants.ProtoConfigPath, filepath.Join(testdataDir, protoFileName))
+ v.Set(commonflags.ProtoConfigPath, filepath.Join(testdataDir, protoFileName))
// Set to the path or remove the next statement to download from the network.
require.NoError(t, Cmd.Flags().Set(commonflags.ContractsInitFlag, contractsPath))
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
index b7102fa13..bb684b3a9 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
@@ -3,6 +3,7 @@ package initialize
import (
"fmt"
"math/big"
+ "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@@ -21,17 +22,16 @@ import (
)
const (
- gasInitialTotalSupply = 30000000 * native.GASFactor
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor
- // alphabetGasRatio is a coefficient that defines the threshold below which
- // the balance of the alphabet node is considered not replenished. The value
- // of this coefficient is determined empirically.
- alphabetGasRatio = 5
)
+func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 {
+ return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
+}
+
func transferFunds(c *helper.InitializeContext) error {
ok, err := transferFundsFinished(c)
if ok || err != nil {
@@ -41,6 +41,11 @@ func transferFunds(c *helper.InitializeContext) error {
return err
}
+ version, err := c.Client.GetVersion()
+ if err != nil {
+ return err
+ }
+
var transfers []transferTarget
for _, acc := range c.Accounts {
to := acc.Contract.ScriptHash()
@@ -58,7 +63,7 @@ func transferFunds(c *helper.InitializeContext) error {
transferTarget{
Token: gas.Hash,
Address: c.CommitteeAcc.Contract.ScriptHash(),
- Amount: (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2,
+ Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)),
},
transferTarget{
Token: neo.Hash,
@@ -79,12 +84,26 @@ func transferFunds(c *helper.InitializeContext) error {
return c.AwaitTx()
}
+// transferFundsFinished checks balances of accounts we transfer GAS to.
+// The stage is considered finished if the balance is greater than the half of what we need to transfer.
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
- acc := c.Accounts[0]
-
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
- res, err := r.BalanceOf(acc.Contract.ScriptHash())
- return res.Cmp(big.NewInt(alphabetGasRatio*native.GASFactor)) == 1, err
+ res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
+ if err != nil {
+ return false, err
+ }
+
+ version, err := c.Client.GetVersion()
+ if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 {
+ return false, err
+ }
+
+ res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
+ if err != nil {
+ return false, err
+ }
+
+ return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
}
func transferGASToProxy(c *helper.InitializeContext) error {
@@ -144,5 +163,17 @@ func createNEP17MultiTransferTx(c helper.Client, acc *wallet.Account, recipients
if err != nil {
return nil, fmt.Errorf("can't create actor: %w", err)
}
- return act.MakeRun(w.Bytes())
+ tx, err := act.MakeRun(w.Bytes())
+ if err != nil {
+ sum := make(map[util.Uint160]int64)
+ for _, recipient := range recipients {
+ sum[recipient.Token] += recipient.Amount
+ }
+ detail := make([]string, 0, len(sum))
+ for _, value := range sum {
+ detail = append(detail, fmt.Sprintf("amount=%v", value))
+ }
+ err = fmt.Errorf("transfer failed: from=%s(%s) %s: %w", acc.Label, acc.Address, strings.Join(detail, " "), err)
+ }
+ return tx, err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go
index b7885c512..50f14e728 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go
@@ -2,7 +2,6 @@ package initialize
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -32,7 +31,7 @@ var Cmd = &cobra.Command{
_ = viper.BindPFlag(commonflags.ContainerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag))
_ = viper.BindPFlag(commonflags.ContainerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag))
_ = viper.BindPFlag(commonflags.WithdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag))
- _ = viper.BindPFlag(constants.ProtoConfigPath, cmd.Flags().Lookup(constants.ProtoConfigPath))
+ _ = viper.BindPFlag(commonflags.ProtoConfigPath, cmd.Flags().Lookup(commonflags.ProtoConfigPath))
},
RunE: initializeSideChainCmd,
}
@@ -48,7 +47,7 @@ func initInitCmd() {
// Defaults are taken from neo-preodolenie.
Cmd.Flags().Uint64(containerFeeCLIFlag, 1000, "Container registration fee")
Cmd.Flags().Uint64(containerAliasFeeCLIFlag, 500, "Container alias fee")
- Cmd.Flags().String(constants.ProtoConfigPath, "", "Path to the consensus node configuration")
+ Cmd.Flags().String(commonflags.ProtoConfigPath, "", "Path to the consensus node configuration")
Cmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
Cmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
index df9a03fd1..94223dbd0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
@@ -12,6 +12,8 @@ import (
"github.com/spf13/viper"
)
+const deltaFlag = "delta"
+
func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
@@ -30,7 +32,8 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
}
bw := io.NewBufBinWriter()
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil {
+ delta, _ := cmd.Flags().GetInt64(deltaFlag)
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go
index d8471bb9a..a689e0ec1 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go
@@ -13,7 +13,7 @@ import (
)
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
index 31fda860e..291482e0f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
@@ -12,7 +12,6 @@ var (
Short: "List netmap candidates nodes",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
},
Run: listNetmapCandidatesNodes,
}
@@ -35,6 +34,7 @@ func initForceNewEpochCmd() {
ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
+ ForceNewEpoch.Flags().Int64(deltaFlag, 1, "Number of epochs to increase the current epoch")
}
func init() {
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
new file mode 100644
index 000000000..14f6eb390
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
@@ -0,0 +1,93 @@
+package nns
+
+import (
+ "math/big"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+func initRegisterCmd() {
+ Cmd.AddCommand(registerCmd)
+ registerCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ registerCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ registerCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+ registerCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email")
+ registerCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, "SOA record REFRESH parameter")
+ registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
+ registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
+ registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
+ registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
+
+ _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
+}
+
+func registerDomain(cmd *cobra.Command, _ []string) {
+ c, actor := nnsWriter(cmd)
+
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ email, _ := cmd.Flags().GetString(nnsEmailFlag)
+ refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag)
+ retry, _ := cmd.Flags().GetInt64(nnsRetryFlag)
+ expire, _ := cmd.Flags().GetInt64(nnsExpireFlag)
+ ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag)
+
+ h, vub, err := c.Register(name, actor.Sender(), email, big.NewInt(refresh),
+ big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl))
+ commonCmd.ExitOnErr(cmd, "unable to register domain: %w", err)
+
+ cmd.Println("Waiting for transaction to persist...")
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "register domain error: %w", err)
+ cmd.Println("Domain registered successfully")
+}
+
+func initDeleteCmd() {
+ Cmd.AddCommand(deleteCmd)
+ deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+ deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
+
+ _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
+}
+
+func deleteDomain(cmd *cobra.Command, _ []string) {
+ c, actor := nnsWriter(cmd)
+
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ h, vub, err := c.DeleteDomain(name)
+
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
+ cmd.Println("Domain deleted successfully")
+}
+
+func initSetAdminCmd() {
+ Cmd.AddCommand(setAdminCmd)
+ setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+ setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
+ setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage)
+ _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath)
+
+ _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag)
+}
+
+func setAdmin(cmd *cobra.Command, _ []string) {
+ c, actor := nnsWriter(cmd)
+
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath))
+ commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err)
+ h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash())
+
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "Set admin error: %w", err)
+ cmd.Println("Set admin successfully")
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go
index 0eaa5ac58..e49f62256 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go
@@ -1,25 +1,67 @@
package nns
import (
+ "errors"
+
client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
-func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, util.Uint160) {
+func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
v := viper.GetViper()
- c, err := helper.GetN3Client(v)
+ c, err := helper.NewRemoteClient(v)
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
- ac, err := helper.NewLocalActor(cmd, c)
+ alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag))
+ walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath))
+ adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath))
+
+ var (
+ alphabet *helper.AlphabetWallets
+ regularWallets []*helper.RegularWallets
+ )
+
+ if alphabetWalletPath != "" {
+ alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName}
+ }
+
+ if walletPath != "" {
+ regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath})
+ }
+
+ if adminWalletPath != "" {
+ regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath})
+ }
+
+ if alphabet == nil && regularWallets == nil {
+ commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided"))
+ }
+
+ ac, err := helper.NewLocalActor(c, alphabet, regularWallets...)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
r := management.NewReader(ac.Invoker)
nnsCs, err := helper.GetContractByID(r, 1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
- return client.New(ac, nnsCs.Hash), ac, nnsCs.Hash
+ return client.New(ac, nnsCs.Hash), ac
+}
+
+func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) {
+ c, err := helper.NewRemoteClient(viper.GetViper())
+ commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
+
+ inv := invoker.New(c, nil)
+ r := management.NewReader(inv)
+ nnsCs, err := helper.GetContractByID(r, 1)
+ commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
+
+ return client.NewReader(inv, nnsCs.Hash), inv
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
index 0e217eb61..9cb47356f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/spf13/cobra"
)
@@ -20,6 +19,7 @@ func initAddRecordCmd() {
addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
+ addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag)
@@ -29,7 +29,6 @@ func initAddRecordCmd() {
func initGetRecordsCmd() {
Cmd.AddCommand(getRecordsCmd)
getRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- getRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
getRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
getRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
@@ -42,13 +41,28 @@ func initDelRecordsCmd() {
delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
+ delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
}
+func initDelRecordCmd() {
+ Cmd.AddCommand(delRecordCmd)
+ delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+ delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
+ delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
+ delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
+
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag)
+}
+
func addRecord(cmd *cobra.Command, _ []string) {
- c, actor, _ := getRPCClient(cmd)
+ c, actor := nnsWriter(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
@@ -64,16 +78,16 @@ func addRecord(cmd *cobra.Command, _ []string) {
}
func getRecords(cmd *cobra.Command, _ []string) {
- c, act, hash := getRPCClient(cmd)
+ c, inv := nnsReader(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
if recordType == "" {
- sid, r, err := unwrap.SessionIterator(act.Invoker.Call(hash, "getAllRecords", name))
+ sid, r, err := c.GetAllRecords(name)
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
defer func() {
- _ = act.Invoker.TerminateSession(sid)
+ _ = inv.TerminateSession(sid)
}()
- items, err := act.Invoker.TraverseIterator(sid, &r, 0)
+ items, err := inv.TraverseIterator(sid, &r, 0)
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
for len(items) != 0 {
for j := range items {
@@ -84,7 +98,7 @@ func getRecords(cmd *cobra.Command, _ []string) {
recordTypeToString(nns.RecordType(rs[1].Value().(*big.Int).Int64())),
string(bs))
}
- items, err = act.Invoker.TraverseIterator(sid, &r, 0)
+ items, err = inv.TraverseIterator(sid, &r, 0)
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
}
} else {
@@ -101,7 +115,7 @@ func getRecords(cmd *cobra.Command, _ []string) {
}
func delRecords(cmd *cobra.Command, _ []string) {
- c, actor, _ := getRPCClient(cmd)
+ c, actor := nnsWriter(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
typ, err := getRecordType(recordType)
@@ -115,6 +129,22 @@ func delRecords(cmd *cobra.Command, _ []string) {
cmd.Println("Records removed successfully")
}
+func delRecord(cmd *cobra.Command, _ []string) {
+ c, actor := nnsWriter(cmd)
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
+ recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
+ typ, err := getRecordType(recordType)
+ commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
+ h, vub, err := c.DeleteRecord(name, typ, data)
+ commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err)
+
+ cmd.Println("Waiting for transaction to persist...")
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "delete records error: %w", err)
+ cmd.Println("Record removed successfully")
+}
+
func getRecordType(recordType string) (*big.Int, error) {
switch strings.ToUpper(recordType) {
case "A":
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/register.go b/cmd/frostfs-adm/internal/modules/morph/nns/register.go
deleted file mode 100644
index d05d9f171..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/register.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package nns
-
-import (
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/spf13/cobra"
-)
-
-func initRegisterCmd() {
- Cmd.AddCommand(registerCmd)
- registerCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- registerCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- registerCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- registerCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email")
- registerCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, "SOA record REFRESH parameter")
- registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
- registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
- registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
-
- _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
-}
-
-func registerDomain(cmd *cobra.Command, _ []string) {
- c, actor, _ := getRPCClient(cmd)
-
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- email, _ := cmd.Flags().GetString(nnsEmailFlag)
- refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag)
- retry, _ := cmd.Flags().GetInt64(nnsRetryFlag)
- expire, _ := cmd.Flags().GetInt64(nnsExpireFlag)
- ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag)
-
- h, vub, err := c.Register(name, actor.Sender(), email, big.NewInt(refresh),
- big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl))
- commonCmd.ExitOnErr(cmd, "unable to register domain: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "register domain error: %w", err)
- cmd.Println("Domain registered successfully")
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go
index b13092240..53bd943f0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go
@@ -14,7 +14,7 @@ func initRenewCmd() {
}
func renewDomain(cmd *cobra.Command, _ []string) {
- c, actor, _ := getRPCClient(cmd)
+ c, actor := nnsWriter(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
h, vub, err := c.Renew(name)
commonCmd.ExitOnErr(cmd, "unable to renew domain: %w", err)
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
index e528e4b7b..bb84933c6 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
@@ -39,9 +39,20 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: registerDomain,
}
+ deleteCmd = &cobra.Command{
+ Use: "delete",
+ Short: "Delete a domain by name",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
+ },
+ Run: deleteDomain,
+ }
renewCmd = &cobra.Command{
Use: "renew",
Short: "Increases domain expiration date",
@@ -66,6 +77,7 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: addRecord,
}
@@ -83,17 +95,42 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: delRecords,
}
+ delRecordCmd = &cobra.Command{
+ Use: "delete-record",
+ Short: "Removes domain record with the specified type and data",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
+ },
+ Run: delRecord,
+ }
+ setAdminCmd = &cobra.Command{
+ Use: "set-admin",
+ Short: "Sets admin for domain",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
+ _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath))
+ },
+ Run: setAdmin,
+ }
)
func init() {
initTokensCmd()
initRegisterCmd()
+ initDeleteCmd()
initRenewCmd()
initUpdateCmd()
initAddRecordCmd()
initGetRecordsCmd()
initDelRecordsCmd()
+ initDelRecordCmd()
+ initSetAdminCmd()
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
index 6e8ffb40a..4ccbb1677 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
@@ -1,24 +1,65 @@
package nns
import (
+ "math/big"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/spf13/cobra"
)
+const (
+ verboseDesc = "Include additional information about CNAME record."
+)
+
func initTokensCmd() {
Cmd.AddCommand(tokensCmd)
tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- tokensCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc)
}
func listTokens(cmd *cobra.Command, _ []string) {
- c, _, _ := getRPCClient(cmd)
+ c, _ := nnsReader(cmd)
it, err := c.Tokens()
commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err)
for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) {
for _, token := range toks {
- cmd.Println(string(token))
+ output := string(token)
+ if verbose, _ := cmd.Flags().GetBool(commonflags.Verbose); verbose {
+ cname, err := getCnameRecord(c, token)
+ commonCmd.ExitOnErr(cmd, "", err)
+ if cname != "" {
+ output += " (CNAME: " + cname + ")"
+ }
+ }
+ cmd.Println(output)
}
}
}
+
+func getCnameRecord(c *client.ContractReader, token []byte) (string, error) {
+ items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME)))
+
+ // GetRecords returns the error "not an array" if the domain does not contain records.
+ if err != nil && strings.Contains(err.Error(), "not an array") {
+ return "", nil
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ if len(items) == 0 {
+ return "", nil
+ }
+
+ record, err := items[0].TryBytes()
+ if err != nil {
+ return "", err
+ }
+
+ return string(record), nil
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/update.go b/cmd/frostfs-adm/internal/modules/morph/nns/update.go
index 3437316e3..c6d77ead6 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/update.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/update.go
@@ -30,7 +30,7 @@ func initUpdateCmd() {
}
func updateSOA(cmd *cobra.Command, _ []string) {
- c, actor, _ := getRPCClient(cmd)
+ c, actor := nnsWriter(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
email, _ := cmd.Flags().GetString(nnsEmailFlag)
diff --git a/cmd/frostfs-adm/internal/modules/morph/node/remove.go b/cmd/frostfs-adm/internal/modules/morph/node/remove.go
index 0a19102ba..e47451e0c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/node/remove.go
+++ b/cmd/frostfs-adm/internal/modules/morph/node/remove.go
@@ -53,7 +53,7 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error {
int64(netmapcontract.NodeStateOffline), nodeKeys[i].Bytes())
}
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil {
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, 1); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go
index 9b213da4e..3435926c0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go
+++ b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go
@@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"math/big"
- "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@@ -41,7 +40,8 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
}
accHash := w.GetChangeAddress()
- if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
+ addr, _ := cmd.Flags().GetString(walletAccountFlag)
+ if addr != "" {
accHash, err = address.StringToUint160(addr)
if err != nil {
return fmt.Errorf("invalid address: %s", addr)
@@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't find account for %s", accHash)
}
- prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
+ prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
pass, err := input.ReadPassword(prompt)
if err != nil {
return fmt.Errorf("can't get password: %v", err)
@@ -73,23 +73,16 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return err
}
- till := int64(defaultNotaryDepositLifetime)
- tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
- if err != nil {
- return err
- }
- if tillStr != "" {
- till, err = strconv.ParseInt(tillStr, 10, 64)
- if err != nil || till <= 0 {
- return errInvalidNotaryDepositLifetime
- }
+ till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
+ if till <= 0 {
+ return errInvalidNotaryDepositLifetime
}
return transferGas(cmd, acc, accHash, gasAmount, till)
}
func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
if err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/root.go b/cmd/frostfs-adm/internal/modules/morph/notary/root.go
index 497ff8ea1..d7be2e503 100644
--- a/cmd/frostfs-adm/internal/modules/morph/notary/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/notary/root.go
@@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
- DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
+ DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
}
func init() {
diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go
index 36547e22c..f2932e87c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go
@@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error {
}
func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
- c, err := helper.GetN3Client(viper.GetViper())
+ c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
inv := invoker.New(c, nil)
@@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
- _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
- _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
- _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
+ _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
+ _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
+ _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
_ = tw.Flush()
cmd.Print(buf.String())
diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go
index cb575b657..24cda45a6 100644
--- a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go
@@ -20,23 +20,32 @@ const (
accountAddressFlag = "account"
)
+func parseAddresses(cmd *cobra.Command) []util.Uint160 {
+ var addrs []util.Uint160
+
+ accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
+ for _, acc := range accs {
+ addr, err := address.StringToUint160(acc)
+ commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
+
+ addrs = append(addrs, addr)
+ }
+ return addrs
+}
+
func addProxyAccount(cmd *cobra.Command, _ []string) {
- acc, _ := cmd.Flags().GetString(accountAddressFlag)
- addr, err := address.StringToUint160(acc)
- commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
- err = processAccount(cmd, addr, "addAccount")
+ addrs := parseAddresses(cmd)
+ err := processAccount(cmd, addrs, "addAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
func removeProxyAccount(cmd *cobra.Command, _ []string) {
- acc, _ := cmd.Flags().GetString(accountAddressFlag)
- addr, err := address.StringToUint160(acc)
- commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
- err = processAccount(cmd, addr, "removeAccount")
+ addrs := parseAddresses(cmd)
+ err := processAccount(cmd, addrs, "removeAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
-func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
+func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
@@ -54,7 +63,9 @@ func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error
}
bw := io.NewBufBinWriter()
- emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
+ for _, addr := range addrs {
+ emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
+ }
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err
diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
index 082bc57d1..ad89af2b5 100644
--- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
@@ -29,12 +29,16 @@ var (
func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
+ AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
+ _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
+ AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
+ RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
+ _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
+ RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func init() {
diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go
index defd898c8..cc8225c7a 100644
--- a/cmd/frostfs-adm/internal/modules/root.go
+++ b/cmd/frostfs-adm/internal/modules/root.go
@@ -5,9 +5,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
@@ -41,8 +41,8 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
- rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(metabase.RootCmd)
+ rootCmd.AddCommand(maintenance.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go
deleted file mode 100644
index 77183fb49..000000000
--- a/cmd/frostfs-adm/internal/modules/storagecfg/config.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package storagecfg
-
-const configTemplate = `logger:
- level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
-
-node:
- wallet:
- path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
- address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
- password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
- addresses: # list of addresses announced by Storage node in the Network map
- - {{ .AnnouncedAddress }}
- attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
- relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
-
-grpc:
- num: 1 # total number of listener endpoints
- 0:
- endpoint: {{ .Endpoint }} # endpoint for gRPC server
- tls:{{if .TLSCert}}
- enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
- certificate: {{ .TLSCert }} # path to TLS certificate
- key: {{ .TLSKey }} # path to TLS key
- {{- else }}
- enabled: false # disable TLS for a gRPC connection
- {{- end}}
-
-control:
- authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
- {{- range .AuthorizedKeys }}
- - {{.}}{{end}}
- grpc:
- endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
-
-morph:
- dial_timeout: 20s # timeout for side chain NEO RPC client connection
- cache_ttl: 15s # use TTL cache for side chain GET operations
- rpc_endpoint: # side chain N3 RPC endpoints
- {{- range .MorphRPC }}
- - address: wss://{{.}}/ws{{end}}
-{{if not .Relay }}
-storage:
- shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
-
- shard:
- default: # section with the default shard parameters
- metabase:
- perm: 0644 # permissions for metabase files(directories: +x for current user and group)
-
- blobstor:
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
- depth: 2 # max depth of object tree storage in FS
- small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
- compress: true # turn on/off Zstandard compression (level 3) of stored objects
- compression_exclude_content_types:
- - audio/*
- - video/*
-
- blobovnicza:
- size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
- depth: 1 # max depth of object tree storage in key-value DB
- width: 4 # max width of object tree storage in key-value DB
- opened_cache_capacity: 50 # maximum number of opened database files
- opened_cache_ttl: 5m # ttl for opened database file
- opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
-
- gc:
- remover_batch_size: 200 # number of objects to be removed by the garbage collector
- remover_sleep_interval: 5m # frequency of the garbage collector invocation
- 0:
- mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
-
- metabase:
- path: {{ .MetabasePath }} # path to the metabase
-
- blobstor:
- path: {{ .BlobstorPath }} # path to the blobstor
-{{end}}`
-
-const (
- neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
- balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
- neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
- balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
-)
-
-var n3config = map[string]struct {
- MorphRPC []string
- RPC []string
- NeoFSContract string
- BalanceContract string
-}{
- "testnet": {
- MorphRPC: []string{
- "rpc01.morph.testnet.fs.neo.org:51331",
- "rpc02.morph.testnet.fs.neo.org:51331",
- "rpc03.morph.testnet.fs.neo.org:51331",
- "rpc04.morph.testnet.fs.neo.org:51331",
- "rpc05.morph.testnet.fs.neo.org:51331",
- "rpc06.morph.testnet.fs.neo.org:51331",
- "rpc07.morph.testnet.fs.neo.org:51331",
- },
- RPC: []string{
- "rpc01.testnet.n3.nspcc.ru:21331",
- "rpc02.testnet.n3.nspcc.ru:21331",
- "rpc03.testnet.n3.nspcc.ru:21331",
- "rpc04.testnet.n3.nspcc.ru:21331",
- "rpc05.testnet.n3.nspcc.ru:21331",
- "rpc06.testnet.n3.nspcc.ru:21331",
- "rpc07.testnet.n3.nspcc.ru:21331",
- },
- NeoFSContract: neofsTestnetAddress,
- BalanceContract: balanceTestnetAddress,
- },
- "mainnet": {
- MorphRPC: []string{
- "rpc1.morph.fs.neo.org:40341",
- "rpc2.morph.fs.neo.org:40341",
- "rpc3.morph.fs.neo.org:40341",
- "rpc4.morph.fs.neo.org:40341",
- "rpc5.morph.fs.neo.org:40341",
- "rpc6.morph.fs.neo.org:40341",
- "rpc7.morph.fs.neo.org:40341",
- },
- RPC: []string{
- "rpc1.n3.nspcc.ru:10331",
- "rpc2.n3.nspcc.ru:10331",
- "rpc3.n3.nspcc.ru:10331",
- "rpc4.n3.nspcc.ru:10331",
- "rpc5.n3.nspcc.ru:10331",
- "rpc6.n3.nspcc.ru:10331",
- "rpc7.n3.nspcc.ru:10331",
- },
- NeoFSContract: neofsMainnetAddress,
- BalanceContract: balanceMainnetAddress,
- },
-}
diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go
deleted file mode 100644
index 127272da5..000000000
--- a/cmd/frostfs-adm/internal/modules/storagecfg/root.go
+++ /dev/null
@@ -1,433 +0,0 @@
-package storagecfg
-
-import (
- "bytes"
- "context"
- "encoding/hex"
- "errors"
- "fmt"
- "math/rand"
- "net"
- "net/url"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "text/template"
- "time"
-
- netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- "github.com/chzyer/readline"
- "github.com/nspcc-dev/neo-go/cli/flags"
- "github.com/nspcc-dev/neo-go/cli/input"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
-
- "github.com/spf13/cobra"
-)
-
-const (
- walletFlag = "wallet"
- accountFlag = "account"
-)
-
-const (
- defaultControlEndpoint = "localhost:8090"
- defaultDataEndpoint = "localhost"
-)
-
-// RootCmd is a root command of config section.
-var RootCmd = &cobra.Command{
- Use: "storage-config [-w wallet] [-a acccount] []",
- Short: "Section for storage node configuration commands",
- Run: storageConfig,
-}
-
-func init() {
- fs := RootCmd.Flags()
-
- fs.StringP(walletFlag, "w", "", "Path to wallet")
- fs.StringP(accountFlag, "a", "", "Wallet account")
-}
-
-type config struct {
- AnnouncedAddress string
- AuthorizedKeys []string
- ControlEndpoint string
- Endpoint string
- TLSCert string
- TLSKey string
- MorphRPC []string
- Attribute struct {
- Locode string
- }
- Wallet struct {
- Path string
- Account string
- Password string
- }
- Relay bool
- BlobstorPath string
- MetabasePath string
-}
-
-func storageConfig(cmd *cobra.Command, args []string) {
- outPath := getOutputPath(args)
-
- historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
- readline.SetHistoryPath(historyPath)
-
- var c config
-
- c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
- if c.Wallet.Path == "" {
- c.Wallet.Path = getPath("Path to the storage node wallet: ")
- }
-
- w, err := wallet.NewWalletFromFile(c.Wallet.Path)
- fatalOnErr(err)
-
- fillWalletAccount(cmd, &c, w)
-
- accH, err := flags.ParseAddress(c.Wallet.Account)
- fatalOnErr(err)
-
- acc := w.GetAccount(accH)
- if acc == nil {
- fatalOnErr(errors.New("can't find account in wallet"))
- }
-
- c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
- fatalOnErr(err)
-
- err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
- fatalOnErr(err)
-
- c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
-
- network := readNetwork(cmd)
-
- c.MorphRPC = n3config[network].MorphRPC
-
- depositGas(cmd, acc, network)
-
- c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
-
- endpoint := getDefaultEndpoint(cmd, &c)
- c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
- if c.Endpoint == "" {
- c.Endpoint = endpoint
- }
-
- c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
- if c.ControlEndpoint == "" {
- c.ControlEndpoint = defaultControlEndpoint
- }
-
- c.TLSCert = getPath("TLS Certificate (optional): ")
- if c.TLSCert != "" {
- c.TLSKey = getPath("TLS Key: ")
- }
-
- c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
- if !c.Relay {
- p := getPath("Path to the storage directory (all available storage will be used): ")
- c.BlobstorPath = filepath.Join(p, "blob")
- c.MetabasePath = filepath.Join(p, "meta")
- }
-
- out := applyTemplate(c)
- fatalOnErr(os.WriteFile(outPath, out, 0o644))
-
- cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
-}
-
-func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
- var addr, port string
- for {
- c.AnnouncedAddress = getString("Publicly announced address: ")
- validator := netutil.Address{}
- err := validator.FromString(c.AnnouncedAddress)
- if err != nil {
- cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
- continue
- }
- uriAddr, err := url.Parse(validator.URIAddr())
- if err != nil {
- panic(fmt.Errorf("unexpected error: %w", err))
- }
- addr = uriAddr.Hostname()
- port = uriAddr.Port()
- ip, err := net.ResolveIPAddr("ip", addr)
- if err != nil {
- cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
- continue
- }
-
- if !ip.IP.IsGlobalUnicast() {
- cmd.Println("IP must be global unicast.")
- continue
- }
- cmd.Printf("Resolved IP address: %s\n", ip.String())
-
- _, err = strconv.ParseUint(port, 10, 16)
- if err != nil {
- cmd.Println("Port must be an integer.")
- continue
- }
-
- break
- }
- return net.JoinHostPort(defaultDataEndpoint, port)
-}
-
-func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
- c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
- if c.Wallet.Account == "" {
- addr := address.Uint160ToString(w.GetChangeAddress())
- c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
- if c.Wallet.Account == "" {
- c.Wallet.Account = addr
- }
- }
-}
-
-func readNetwork(cmd *cobra.Command) string {
- var network string
- for {
- network = getString("Choose network [mainnet]/testnet: ")
- switch network {
- case "":
- network = "mainnet"
- case "testnet", "mainnet":
- default:
- cmd.Println(`Network must be either "mainnet" or "testnet"`)
- continue
- }
- break
- }
- return network
-}
-
-func getOutputPath(args []string) string {
- if len(args) != 0 {
- return args[0]
- }
- outPath := getPath("File to write config at [./config.yml]: ")
- if outPath == "" {
- outPath = "./config.yml"
- }
- return outPath
-}
-
-func getWalletAccount(w *wallet.Wallet, prompt string) string {
- addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
- for i := range w.Accounts {
- addrs[i] = readline.PcItem(w.Accounts[i].Address)
- }
-
- readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
- defer readline.SetAutoComplete(nil)
-
- s, err := readline.Line(prompt)
- fatalOnErr(err)
- return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
-}
-
-func getString(prompt string) string {
- s, err := readline.Line(prompt)
- fatalOnErr(err)
- if s != "" {
- _ = readline.AddHistory(s)
- }
- return s
-}
-
-type filenameCompleter struct{}
-
-func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
- prefix := string(line[:pos])
- dir := filepath.Dir(prefix)
- de, err := os.ReadDir(dir)
- if err != nil {
- return nil, 0
- }
-
- for i := range de {
- name := filepath.Join(dir, de[i].Name())
- if strings.HasPrefix(name, prefix) {
- tail := []rune(strings.TrimPrefix(name, prefix))
- if de[i].IsDir() {
- tail = append(tail, filepath.Separator)
- }
- newLine = append(newLine, tail)
- }
- }
- if pos != 0 {
- return newLine, pos - len([]rune(dir))
- }
- return newLine, 0
-}
-
-func getPath(prompt string) string {
- readline.SetAutoComplete(filenameCompleter{})
- defer readline.SetAutoComplete(nil)
-
- p, err := readline.Line(prompt)
- fatalOnErr(err)
-
- if p == "" {
- return p
- }
-
- _ = readline.AddHistory(p)
-
- abs, err := filepath.Abs(p)
- if err != nil {
- fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
- }
-
- return abs
-}
-
-func getConfirmation(def bool, prompt string) bool {
- for {
- s, err := readline.Line(prompt)
- fatalOnErr(err)
-
- switch strings.ToLower(s) {
- case "y", "yes":
- return true
- case "n", "no":
- return false
- default:
- if len(s) == 0 {
- return def
- }
- }
- }
-}
-
-func applyTemplate(c config) []byte {
- tmpl, err := template.New("config").Parse(configTemplate)
- fatalOnErr(err)
-
- b := bytes.NewBuffer(nil)
- fatalOnErr(tmpl.Execute(b, c))
-
- return b.Bytes()
-}
-
-func fatalOnErr(err error) {
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
- os.Exit(1)
- }
-}
-
-func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
- sideClient := initClient(n3config[network].MorphRPC)
- balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
-
- sideActor, err := actor.NewSimple(sideClient, acc)
- if err != nil {
- fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
- }
-
- sideGas := nep17.NewReader(sideActor, balanceHash)
- accSH := acc.Contract.ScriptHash()
-
- balance, err := sideGas.BalanceOf(accSH)
- if err != nil {
- fatalOnErr(fmt.Errorf("side chain balance: %w", err))
- }
-
- ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
- fixedn.ToString(balance, 12)))
- if !ok {
- return
- }
-
- amountStr := getString("Enter amount in GAS: ")
- amount, err := fixedn.FromString(amountStr, 8)
- if err != nil {
- fatalOnErr(fmt.Errorf("invalid amount: %w", err))
- }
-
- mainClient := initClient(n3config[network].RPC)
- neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
-
- mainActor, err := actor.NewSimple(mainClient, acc)
- if err != nil {
- fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
- }
-
- mainGas := nep17.New(mainActor, gas.Hash)
-
- txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
- if err != nil {
- fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
- }
-
- cmd.Print("Waiting for transactions to persist.")
- tick := time.NewTicker(time.Second / 2)
- defer tick.Stop()
-
- timer := time.NewTimer(time.Second * 20)
- defer timer.Stop()
-
- at := trigger.Application
-
-loop:
- for {
- select {
- case <-tick.C:
- _, err := mainClient.GetApplicationLog(txHash, &at)
- if err == nil {
- cmd.Print("\n")
- break loop
- }
- cmd.Print(".")
- case <-timer.C:
- cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
- if getConfirmation(false, "Continue configuration? yes/[no]: ") {
- return
- }
- os.Exit(1)
- }
- }
-}
-
-func initClient(rpc []string) *rpcclient.Client {
- var c *rpcclient.Client
- var err error
-
- shuffled := make([]string, len(rpc))
- copy(shuffled, rpc)
- rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
-
- for _, endpoint := range shuffled {
- c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
- DialTimeout: time.Second * 2,
- RequestTimeout: time.Second * 5,
- })
- if err != nil {
- continue
- }
- if err = c.Init(); err != nil {
- continue
- }
- return c
- }
-
- fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
- panic("unreachable")
-}
diff --git a/cmd/frostfs-cli/docs/sessions.md b/cmd/frostfs-cli/docs/sessions.md
index 04563b7af..52c0e9b9b 100644
--- a/cmd/frostfs-cli/docs/sessions.md
+++ b/cmd/frostfs-cli/docs/sessions.md
@@ -72,4 +72,3 @@ All other `object` sub-commands support only static sessions (2).
List of commands supporting sessions (static only):
- `create`
- `delete`
-- `set-eacl`
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index 03a987a57..299d0a830 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -9,8 +9,6 @@ import (
"io"
"os"
"slices"
- "sort"
- "strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@@ -78,13 +76,29 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers()
- sort.Slice(list, func(i, j int) bool {
- lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
- return strings.Compare(lhs, rhs) < 0
- })
+ slices.SortFunc(list, cid.ID.Cmp)
return list
}
+func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
+ cliPrm := &client.PrmContainerListStream{
+ XHeaders: prm.XHeaders,
+ OwnerID: prm.OwnerID,
+ Session: prm.Session,
+ }
+ rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
+ if err != nil {
+ return fmt.Errorf("init container list: %w", err)
+ }
+
+ err = rdr.Iterate(processCnr)
+ if err != nil {
+ return fmt.Errorf("read container list: %w", err)
+ }
+
+ return
+}
+
// PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct {
Client *client.Client
@@ -565,13 +579,6 @@ type HeadObjectPrm struct {
commonObjectPrm
objectAddressPrm
rawPrm
-
- mainOnly bool
-}
-
-// SetMainOnlyFlag sets flag to get only main fields of an object header in terms of FrostFS API.
-func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) {
- x.mainOnly = v
}
// HeadObjectRes groups the resulting values of HeadObject operation.
@@ -666,9 +673,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
for {
n, ok = rdr.Read(buf)
- for i := range n {
- list = append(list, buf[i])
- }
+ list = append(list, buf[:n]...)
if !ok {
break
}
@@ -679,10 +684,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
return nil, fmt.Errorf("read object list: %w", err)
}
- sort.Slice(list, func(i, j int) bool {
- lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
- return strings.Compare(lhs, rhs) < 0
- })
+ slices.SortFunc(list, oid.ID.Cmp)
return &SearchObjectsRes{
ids: list,
@@ -856,6 +858,8 @@ type PatchObjectPrm struct {
ReplaceAttribute bool
+ NewSplitHeader *objectSDK.SplitHeader
+
PayloadPatches []PayloadPatch
}
@@ -886,7 +890,11 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
return nil, fmt.Errorf("init payload reading: %w", err)
}
- if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
+ if patcher.PatchHeader(ctx, client.PatchHeaderPrm{
+ NewSplitHeader: prm.NewSplitHeader,
+ NewAttributes: prm.NewAttributes,
+ ReplaceAttributes: prm.ReplaceAttribute,
+ }) {
for _, pp := range prm.PayloadPatches {
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
if err != nil {
diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go
index f7c48b871..1eadfa2e1 100644
--- a/cmd/frostfs-cli/internal/client/sdk.go
+++ b/cmd/frostfs-cli/internal/client/sdk.go
@@ -56,8 +56,9 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
prmDial := client.PrmDial{
Endpoint: addr.URIAddr(),
GRPCDialOptions: []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
+ grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
+ grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
},
}
if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
diff --git a/cmd/frostfs-cli/internal/common/tracing.go b/cmd/frostfs-cli/internal/common/tracing.go
index 30c2f2b1a..10863ed1e 100644
--- a/cmd/frostfs-cli/internal/common/tracing.go
+++ b/cmd/frostfs-cli/internal/common/tracing.go
@@ -2,7 +2,7 @@ package common
import (
"context"
- "sort"
+ "slices"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
@@ -45,15 +45,11 @@ func StartClientCommandSpan(cmd *cobra.Command) {
})
commonCmd.ExitOnErr(cmd, "init tracing: %w", err)
- var components sort.StringSlice
+ var components []string
for c := cmd; c != nil; c = c.Parent() {
components = append(components, c.Name())
}
- for i, j := 0, len(components)-1; i < j; {
- components.Swap(i, j)
- i++
- j--
- }
+ slices.Reverse(components)
operation := strings.Join(components, ".")
ctx, span := tracing.StartSpanFromContext(cmd.Context(), operation)
diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go
index 88321176f..6ed21e107 100644
--- a/cmd/frostfs-cli/internal/commonflags/api.go
+++ b/cmd/frostfs-cli/internal/commonflags/api.go
@@ -9,7 +9,7 @@ const (
TTL = "ttl"
TTLShorthand = ""
TTLDefault = 2
- TTLUsage = "TTL value in request meta header"
+ TTLUsage = "The maximum number of intermediate nodes in the request route"
XHeadersKey = "xhdr"
XHeadersShorthand = "x"
diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go
index cd46d63eb..fad1f6183 100644
--- a/cmd/frostfs-cli/internal/commonflags/flags.go
+++ b/cmd/frostfs-cli/internal/commonflags/flags.go
@@ -28,7 +28,7 @@ const (
RPC = "rpc-endpoint"
RPCShorthand = "r"
RPCDefault = ""
- RPCUsage = "Remote node address (as 'multiaddr' or ':')"
+ RPCUsage = "Remote node address (':' or 'grpcs://:')"
Timeout = "timeout"
TimeoutShorthand = "t"
diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
index c6622da25..f4039283f 100644
--- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go
+++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
@@ -1,45 +1,19 @@
package apemanager
import (
- "encoding/hex"
- "errors"
+ "fmt"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/spf13/cobra"
)
-const (
- chainIDFlag = "chain-id"
- chainIDHexFlag = "chain-id-hex"
- ruleFlag = "rule"
- pathFlag = "path"
-)
-
-const (
- targetNameFlag = "target-name"
- targetNameDesc = "Resource name in APE resource name format"
- targetTypeFlag = "target-type"
- targetTypeDesc = "Resource type(container/namespace)"
-)
-
-const (
- defaultNamespace = ""
- namespaceTarget = "namespace"
- containerTarget = "container"
- userTarget = "user"
- groupTarget = "group"
-)
-
-var errUnknownTargetType = errors.New("unknown target type")
-
var addCmd = &cobra.Command{
Use: "add",
Short: "Add rule chain for a target",
@@ -50,55 +24,28 @@ var addCmd = &cobra.Command{
}
func parseTarget(cmd *cobra.Command) (ct apeSDK.ChainTarget) {
- typ, _ := cmd.Flags().GetString(targetTypeFlag)
- name, _ := cmd.Flags().GetString(targetNameFlag)
+ t := apeCmd.ParseTarget(cmd)
- ct.Name = name
+ ct.Name = t.Name
- switch typ {
- case namespaceTarget:
+ switch t.Type {
+ case engine.Namespace:
ct.TargetType = apeSDK.TargetTypeNamespace
- case containerTarget:
- var cnr cid.ID
- commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
+ case engine.Container:
ct.TargetType = apeSDK.TargetTypeContainer
- case userTarget:
+ case engine.User:
ct.TargetType = apeSDK.TargetTypeUser
- case groupTarget:
+ case engine.Group:
ct.TargetType = apeSDK.TargetTypeGroup
default:
- commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
+ commonCmd.ExitOnErr(cmd, "conversion error: %w", fmt.Errorf("unknown type '%c'", t.Type))
}
return ct
}
func parseChain(cmd *cobra.Command) apeSDK.Chain {
- chainID, _ := cmd.Flags().GetString(chainIDFlag)
- hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
-
- chainIDRaw := []byte(chainID)
-
- if hexEncoded {
- var err error
- chainIDRaw, err = hex.DecodeString(chainID)
- commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
- }
-
- chain := new(apechain.Chain)
- chain.ID = apechain.ID(chainIDRaw)
-
- if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
- commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules))
- } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
- commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath))
- } else {
- commonCmd.ExitOnErr(cmd, "parser error: %w", errors.New("rule is not passed"))
- }
-
- cmd.Println("Parsed chain:")
- util.PrintHumanReadableAPEChain(cmd, chain)
-
- serialized := chain.Bytes()
+ c := apeCmd.ParseChain(cmd)
+ serialized := c.Bytes()
return apeSDK.Chain{
Raw: serialized,
}
@@ -127,13 +74,13 @@ func initAddCmd() {
commonflags.Init(addCmd)
ff := addCmd.Flags()
- ff.StringArray(ruleFlag, []string{}, "Rule statement")
- ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format")
- ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
- ff.String(targetNameFlag, "", targetNameDesc)
- ff.String(targetTypeFlag, "", targetTypeDesc)
- _ = addCmd.MarkFlagRequired(targetTypeFlag)
- ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc)
+ ff.String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc)
+ ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
+ ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = addCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc)
- addCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag)
+ addCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag)
}
diff --git a/cmd/frostfs-cli/modules/ape_manager/list_chain.go b/cmd/frostfs-cli/modules/ape_manager/list_chain.go
index a5dd44614..b07ecc52f 100644
--- a/cmd/frostfs-cli/modules/ape_manager/list_chain.go
+++ b/cmd/frostfs-cli/modules/ape_manager/list_chain.go
@@ -4,8 +4,8 @@ import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
@@ -35,7 +35,7 @@ func list(cmd *cobra.Command, _ []string) {
for _, respChain := range resp.Chains {
var chain apechain.Chain
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(respChain.Raw))
- apeutil.PrintHumanReadableAPEChain(cmd, &chain)
+ apeCmd.PrintHumanReadableAPEChain(cmd, &chain)
}
}
@@ -43,7 +43,7 @@ func initListCmd() {
commonflags.Init(listCmd)
ff := listCmd.Flags()
- ff.String(targetNameFlag, "", targetNameDesc)
- ff.String(targetTypeFlag, "", targetTypeDesc)
- _ = listCmd.MarkFlagRequired(targetTypeFlag)
+ ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = listCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
}
diff --git a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go
index 179bd5c9e..136ca81c3 100644
--- a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go
+++ b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go
@@ -1,29 +1,23 @@
package apemanager
import (
- "encoding/hex"
- "errors"
-
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
-var (
- errEmptyChainID = errors.New("chain id cannot be empty")
-
- removeCmd = &cobra.Command{
- Use: "remove",
- Short: "Remove rule chain for a target",
- Run: remove,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
- commonflags.Bind(cmd)
- },
- }
-)
+var removeCmd = &cobra.Command{
+ Use: "remove",
+ Short: "Remove rule chain for a target",
+ Run: remove,
+ PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ commonflags.Bind(cmd)
+ },
+}
func remove(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
@@ -31,19 +25,9 @@ func remove(cmd *cobra.Command, _ []string) {
key := key.Get(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
- chainID, _ := cmd.Flags().GetString(chainIDFlag)
- if chainID == "" {
- commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID)
- }
+ chainID := apeCmd.ParseChainID(cmd)
chainIDRaw := []byte(chainID)
- hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
- if hexEncoded {
- var err error
- chainIDRaw, err = hex.DecodeString(chainID)
- commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
- }
-
_, err := cli.APEManagerRemoveChain(cmd.Context(), client_sdk.PrmAPEManagerRemoveChain{
ChainTarget: target,
ChainID: chainIDRaw,
@@ -58,9 +42,10 @@ func initRemoveCmd() {
commonflags.Init(removeCmd)
ff := removeCmd.Flags()
- ff.String(targetNameFlag, "", targetNameDesc)
- ff.String(targetTypeFlag, "", targetTypeDesc)
- _ = removeCmd.MarkFlagRequired(targetTypeFlag)
- ff.String(chainIDFlag, "", "Chain id")
- ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = removeCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
+ _ = removeCmd.MarkFlagRequired(apeCmd.ChainIDFlag)
+ ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc)
}
diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go
index a86506c37..0927788ba 100644
--- a/cmd/frostfs-cli/modules/bearer/create.go
+++ b/cmd/frostfs-cli/modules/bearer/create.go
@@ -44,6 +44,7 @@ is set to current epoch + n.
_ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account))
+ _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC))
},
}
@@ -81,7 +82,7 @@ func createToken(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err)
if iatRelative || expRelative || nvbRelative {
- endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
+ endpoint := viper.GetString(commonflags.RPC)
if len(endpoint) == 0 {
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC))
}
diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go
index 482c0027e..9632061f1 100644
--- a/cmd/frostfs-cli/modules/bearer/generate_override.go
+++ b/cmd/frostfs-cli/modules/bearer/generate_override.go
@@ -1,31 +1,20 @@
package bearer
import (
- "errors"
"fmt"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
-var (
- errChainIDCannotBeEmpty = errors.New("chain id cannot be empty")
- errRuleIsNotParsed = errors.New("rule is not passed")
-)
-
const (
- chainIDFlag = "chain-id"
- chainIDHexFlag = "chain-id-hex"
- ruleFlag = "rule"
- pathFlag = "path"
- outputFlag = "output"
+ outputFlag = "output"
)
var generateAPEOverrideCmd = &cobra.Command{
@@ -40,7 +29,7 @@ Generated APE override can be dumped to a file in JSON format that is passed to
}
func genereateAPEOverride(cmd *cobra.Command, _ []string) {
- c := parseChain(cmd)
+ c := apeCmd.ParseChain(cmd)
targetCID, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cid cidSDK.ID
@@ -63,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" {
- err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
+ err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else {
fmt.Print("\n")
@@ -77,39 +66,11 @@ func init() {
ff.StringP(commonflags.CIDFlag, "", "", "Target container ID.")
_ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.CIDFlag)
- ff.StringArray(ruleFlag, []string{}, "Rule statement")
- ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format")
- ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
- ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement")
+ ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format")
+ ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain")
+ ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
ff.String(outputFlag, "", "Output path to dump result JSON-encoded APE override")
_ = cobra.MarkFlagFilename(createCmd.Flags(), outputFlag)
}
-
-func parseChainID(cmd *cobra.Command) apechain.ID {
- chainID, _ := cmd.Flags().GetString(chainIDFlag)
- if chainID == "" {
- commonCmd.ExitOnErr(cmd, "read chain id error: %w",
- errChainIDCannotBeEmpty)
- }
- return apechain.ID(chainID)
-}
-
-func parseChain(cmd *cobra.Command) *apechain.Chain {
- chain := new(apechain.Chain)
-
- if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
- commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules))
- } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
- commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath))
- } else {
- commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed)
- }
-
- chain.ID = parseChainID(cmd)
-
- cmd.Println("Parsed chain:")
- parseutil.PrintHumanReadableAPEChain(cmd, chain)
-
- return chain
-}
diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go
index f37b169ce..30f995180 100644
--- a/cmd/frostfs-cli/modules/container/create.go
+++ b/cmd/frostfs-cli/modules/container/create.go
@@ -7,22 +7,20 @@ import (
"strings"
"time"
- containerApi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ containerApi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
)
var (
- containerACL string
containerPolicy string
containerAttributes []string
containerAwait bool
@@ -89,9 +87,6 @@ It will be stored in sidechain when inner ring will accepts it.`,
err = parseAttributes(&cnr, containerAttributes)
commonCmd.ExitOnErr(cmd, "", err)
- var basicACL acl.Basic
- commonCmd.ExitOnErr(cmd, "decode basic ACL string: %w", basicACL.DecodeString(containerACL))
-
tok := getSession(cmd)
if tok != nil {
@@ -105,7 +100,6 @@ It will be stored in sidechain when inner ring will accepts it.`,
}
cnr.SetPlacementPolicy(*placementPolicy)
- cnr.SetBasicACL(basicACL)
var syncContainerPrm internalclient.SyncContainerPrm
syncContainerPrm.SetClient(cli)
@@ -163,10 +157,6 @@ func initContainerCreateCmd() {
flags.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage)
flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
-
- flags.StringVar(&containerACL, "basic-acl", acl.NamePrivate, fmt.Sprintf("HEX encoded basic ACL value or keywords like '%s', '%s', '%s'",
- acl.NamePublicRW, acl.NamePrivate, acl.NamePublicROExtended,
- ))
flags.StringVarP(&containerPolicy, "policy", "p", "", "QL-encoded or JSON-encoded placement policy or path to file with it")
flags.StringSliceVarP(&containerAttributes, "attributes", "a", nil, "Comma separated pairs of container attributes in form of Key1=Value1,Key2=Value2")
flags.BoolVar(&containerAwait, "await", false, "Block execution until container is persisted")
diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go
index 8c4ab14f8..fac6eb2cd 100644
--- a/cmd/frostfs-cli/modules/container/get.go
+++ b/cmd/frostfs-cli/modules/container/get.go
@@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod
cmd.Println("created:", container.CreatedAt(cnr))
cmd.Println("attributes:")
- cnr.IterateAttributes(func(key, val string) {
+ for key, val := range cnr.Attributes() {
cmd.Printf("\t%s=%s\n", key, val)
- })
+ }
cmd.Println("placement policy:")
commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd)))
diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go
index f01e4db4d..e4a023d91 100644
--- a/cmd/frostfs-cli/modules/container/list.go
+++ b/cmd/frostfs-cli/modules/container/list.go
@@ -6,8 +6,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
// flags of list command.
@@ -51,44 +54,60 @@ var listContainersCmd = &cobra.Command{
var prm internalclient.ListContainersPrm
prm.SetClient(cli)
- prm.Account = idUser
-
- res, err := internalclient.ListContainers(cmd.Context(), prm)
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
+ prm.OwnerID = idUser
prmGet := internalclient.GetContainerPrm{
Client: cli,
}
+ var containerIDs []cid.ID
+
+ err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
+ printContainer(cmd, prmGet, id)
+ return false
+ })
+ if err == nil {
+ return
+ }
+
+ if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
+ res, err := internalclient.ListContainers(cmd.Context(), prm)
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+ containerIDs = res.SortedIDList()
+ } else {
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+ }
- containerIDs := res.SortedIDList()
for _, cnrID := range containerIDs {
- if flagVarListName == "" && !flagVarListPrintAttr {
- cmd.Println(cnrID.String())
- continue
- }
-
- prmGet.ClientParams.ContainerID = &cnrID
- res, err := internalclient.GetContainer(cmd.Context(), prmGet)
- if err != nil {
- cmd.Printf(" failed to read attributes: %v\n", err)
- continue
- }
-
- cnr := res.Container()
- if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
- continue
- }
- cmd.Println(cnrID.String())
-
- if flagVarListPrintAttr {
- cnr.IterateUserAttributes(func(key, val string) {
- cmd.Printf(" %s: %s\n", key, val)
- })
- }
+ printContainer(cmd, prmGet, cnrID)
}
},
}
+func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
+ if flagVarListName == "" && !flagVarListPrintAttr {
+ cmd.Println(id.String())
+ return
+ }
+
+ prmGet.ClientParams.ContainerID = &id
+ res, err := internalclient.GetContainer(cmd.Context(), prmGet)
+ if err != nil {
+ cmd.Printf(" failed to read attributes: %v\n", err)
+ return
+ }
+
+ cnr := res.Container()
+ if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
+ return
+ }
+ cmd.Println(id.String())
+
+ if flagVarListPrintAttr {
+ for key, val := range cnr.Attributes() {
+ cmd.Printf(" %s: %s\n", key, val)
+ }
+ }
+}
+
func initContainerListContainersCmd() {
commonflags.Init(listContainersCmd)
diff --git a/cmd/frostfs-cli/modules/container/list_objects.go b/cmd/frostfs-cli/modules/container/list_objects.go
index ff2f8cf45..d5850359d 100644
--- a/cmd/frostfs-cli/modules/container/list_objects.go
+++ b/cmd/frostfs-cli/modules/container/list_objects.go
@@ -1,9 +1,6 @@
package container
import (
- "strings"
-
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
@@ -67,14 +64,8 @@ var listContainerObjectsCmd = &cobra.Command{
resHead, err := internalclient.HeadObject(cmd.Context(), prmHead)
if err == nil {
- attrs := resHead.Header().Attributes()
- for i := range attrs {
- attrKey := attrs[i].Key()
- if !strings.HasPrefix(attrKey, v2object.SysAttributePrefix) && !strings.HasPrefix(attrKey, v2object.SysAttributePrefixNeoFS) {
- // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
- // Use dedicated method to skip system attributes.
- cmd.Printf(" %s: %s\n", attrKey, attrs[i].Value())
- }
+ for _, attr := range resHead.Header().UserAttributes() {
+ cmd.Printf(" %s: %s\n", attr.Key(), attr.Value())
}
} else {
cmd.Printf(" failed to read attributes: %v\n", err)
diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go
index 095ab6438..cf4862b4a 100644
--- a/cmd/frostfs-cli/modules/container/policy_playground.go
+++ b/cmd/frostfs-cli/modules/container/policy_playground.go
@@ -1,12 +1,13 @@
package container
import (
- "bufio"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
- "io"
+ "maps"
"os"
+ "slices"
"strings"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@@ -14,20 +15,22 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/chzyer/readline"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
type policyPlaygroundREPL struct {
- cmd *cobra.Command
- nodes map[string]netmap.NodeInfo
+ cmd *cobra.Command
+ nodes map[string]netmap.NodeInfo
+ console *readline.Instance
}
-func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
+func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
return &policyPlaygroundREPL{
cmd: cmd,
nodes: map[string]netmap.NodeInfo{},
- }, nil
+ }
}
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@@ -37,10 +40,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error {
i := 1
for id, node := range repl.nodes {
var attrs []string
- node.IterateAttributes(func(k, v string) {
+ for k, v := range node.Attributes() {
attrs = append(attrs, fmt.Sprintf("%s:%q", k, v))
- })
- fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
+ }
+ fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
i++
}
return nil
@@ -147,12 +150,29 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error {
for _, node := range ns {
ids = append(ids, hex.EncodeToString(node.PublicKey()))
}
- fmt.Printf("\t%2d: %v\n", i+1, ids)
+ fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids)
}
return nil
}
+func (repl *policyPlaygroundREPL) handleHelp(args []string) error {
+ if len(args) != 0 {
+ if _, ok := commands[args[0]]; !ok {
+ return fmt.Errorf("unknown command: %q", args[0])
+ }
+ fmt.Fprintln(repl.console, commands[args[0]].usage)
+ return nil
+ }
+
+ commandList := slices.Collect(maps.Keys(commands))
+ slices.Sort(commandList)
+ for _, command := range commandList {
+ fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion)
+ }
+ return nil
+}
+
func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
var nm netmap.NetMap
var nodes []netmap.NodeInfo
@@ -163,6 +183,105 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
return nm
}
+type commandDescription struct {
+ descriprion string
+ usage string
+}
+
+var commands = map[string]commandDescription{
+ "list": {
+ descriprion: "Display all nodes in the netmap",
+ usage: `Display all nodes in the netmap
+Example of usage:
+ list
+ 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
+ 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
+`,
+ },
+
+ "ls": {
+ descriprion: "Display all nodes in the netmap",
+ usage: `Display all nodes in the netmap
+Example of usage:
+ ls
+ 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
+ 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
+`,
+ },
+
+ "add": {
+ descriprion: "Add a new node: add attr=value",
+ usage: `Add a new node
+Example of usage:
+ add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`,
+ },
+
+ "load": {
+ descriprion: "Load netmap from file: load ",
+ usage: `Load netmap from file
+Example of usage:
+ load "netmap.json"
+File format (netmap.json):
+{
+ "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": {
+ "continent": "Europe",
+ "country": "Poland"
+ },
+ "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": {
+ "continent": "Antarctica",
+ "country": "Heard Island"
+ }
+}`,
+ },
+
+ "remove": {
+ descriprion: "Remove a node: remove ",
+ usage: `Remove a node
+Example of usage:
+ remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
+ },
+
+ "rm": {
+ descriprion: "Remove a node: rm ",
+ usage: `Remove a node
+Example of usage:
+ rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
+ },
+
+ "eval": {
+ descriprion: "Evaluate a policy: eval ",
+ usage: `Evaluate a policy
+Example of usage:
+ eval REP 2`,
+ },
+
+ "help": {
+ descriprion: "Show available commands",
+ },
+}
+
+func (repl *policyPlaygroundREPL) handleCommand(args []string) error {
+ if len(args) == 0 {
+ return nil
+ }
+
+ switch args[0] {
+ case "list", "ls":
+ return repl.handleLs(args[1:])
+ case "add":
+ return repl.handleAdd(args[1:])
+ case "load":
+ return repl.handleLoad(args[1:])
+ case "remove", "rm":
+ return repl.handleRemove(args[1:])
+ case "eval":
+ return repl.handleEval(args[1:])
+ case "help":
+ return repl.handleHelp(args[1:])
+ }
+ return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0])
+}
+
func (repl *policyPlaygroundREPL) run() error {
if len(viper.GetString(commonflags.RPC)) > 0 {
key := key.GetOrGenerate(repl.cmd)
@@ -180,36 +299,51 @@ func (repl *policyPlaygroundREPL) run() error {
}
}
- cmdHandlers := map[string]func([]string) error{
- "list": repl.handleLs,
- "ls": repl.handleLs,
- "add": repl.handleAdd,
- "load": repl.handleLoad,
- "remove": repl.handleRemove,
- "rm": repl.handleRemove,
- "eval": repl.handleEval,
+ if len(viper.GetString(netmapConfigPath)) > 0 {
+ err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)})
+ commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err)
}
- for reader := bufio.NewReader(os.Stdin); ; {
- fmt.Print("> ")
- line, err := reader.ReadString('\n')
+
+ var cfgCompleter []readline.PrefixCompleterInterface
+ var helpSubItems []readline.PrefixCompleterInterface
+
+ for name := range commands {
+ if name != "help" {
+ cfgCompleter = append(cfgCompleter, readline.PcItem(name))
+ helpSubItems = append(helpSubItems, readline.PcItem(name))
+ }
+ }
+
+ cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...))
+ completer := readline.NewPrefixCompleter(cfgCompleter...)
+ rl, err := readline.NewEx(&readline.Config{
+ Prompt: "> ",
+ InterruptPrompt: "^C",
+ AutoComplete: completer,
+ })
+ if err != nil {
+ return fmt.Errorf("error initializing readline: %w", err)
+ }
+ repl.console = rl
+ defer rl.Close()
+
+ var exit bool
+ for {
+ line, err := rl.Readline()
if err != nil {
- if err == io.EOF {
- return nil
+ if errors.Is(err, readline.ErrInterrupt) {
+ if exit {
+ return nil
+ }
+ exit = true
+ continue
}
- return fmt.Errorf("reading line: %v", err)
+ return fmt.Errorf("reading line: %w", err)
}
- parts := strings.Fields(line)
- if len(parts) == 0 {
- continue
- }
- cmd := parts[0]
- handler, exists := cmdHandlers[cmd]
- if exists {
- if err := handler(parts[1:]); err != nil {
- fmt.Printf("error: %v\n", err)
- }
- } else {
- fmt.Printf("error: unknown command %q\n", cmd)
+ exit = false
+
+ if err := repl.handleCommand(strings.Fields(line)); err != nil {
+ fmt.Fprintf(repl.console, "error: %v\n", err)
}
}
}
@@ -220,12 +354,19 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) {
- repl, err := newPolicyPlaygroundREPL(cmd)
- commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
+ repl := newPolicyPlaygroundREPL(cmd)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
},
}
+const (
+ netmapConfigPath = "netmap-config"
+ netmapConfigUsage = "Path to the netmap configuration file"
+)
+
func initContainerPolicyPlaygroundCmd() {
commonflags.Init(policyPlaygroundCmd)
+ policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage)
+
+ _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath))
}
diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go
index a22d0525d..42f229ad9 100644
--- a/cmd/frostfs-cli/modules/control/add_rule.go
+++ b/cmd/frostfs-cli/modules/control/add_rule.go
@@ -1,23 +1,14 @@
package control
import (
- "encoding/hex"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
-const (
- ruleFlag = "rule"
- pathFlag = "path"
-)
-
var addRuleCmd = &cobra.Command{
Use: "add-rule",
Short: "Add local override",
@@ -31,41 +22,12 @@ control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid ..
Run: addRule,
}
-func parseChain(cmd *cobra.Command) *apechain.Chain {
- chainID, _ := cmd.Flags().GetString(chainIDFlag)
- hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
-
- chainIDRaw := []byte(chainID)
-
- if hexEncoded {
- var err error
- chainIDRaw, err = hex.DecodeString(chainID)
- commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
- }
-
- chain := new(apechain.Chain)
- chain.ID = apechain.ID(chainIDRaw)
-
- if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
- commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules))
- } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
- commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath))
- } else {
- commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed"))
- }
-
- cmd.Println("Parsed chain:")
- util.PrintHumanReadableAPEChain(cmd, chain)
-
- return chain
-}
-
func addRule(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
target := parseTarget(cmd)
- parsed := parseChain(cmd)
+ parsed := apeCmd.ParseChain(cmd)
req := &control.AddChainLocalOverrideRequest{
Body: &control.AddChainLocalOverrideRequest_Body{
@@ -94,13 +56,13 @@ func initControlAddRuleCmd() {
initControlFlags(addRuleCmd)
ff := addRuleCmd.Flags()
- ff.StringArray(ruleFlag, []string{}, "Rule statement")
- ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format")
- ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
- ff.String(targetNameFlag, "", targetNameDesc)
- ff.String(targetTypeFlag, "", targetTypeDesc)
- _ = addRuleCmd.MarkFlagRequired(targetTypeFlag)
- ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement")
+ ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format")
+ ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain")
+ ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = addRuleCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
- addRuleCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag)
+ addRuleCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/detach_shards.go b/cmd/frostfs-cli/modules/control/detach_shards.go
index 5e5b60c3d..025a6e561 100644
--- a/cmd/frostfs-cli/modules/control/detach_shards.go
+++ b/cmd/frostfs-cli/modules/control/detach_shards.go
@@ -1,10 +1,10 @@
package control
import (
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/doctor.go b/cmd/frostfs-cli/modules/control/doctor.go
index 13bb81a0a..632cdd6a7 100644
--- a/cmd/frostfs-cli/modules/control/doctor.go
+++ b/cmd/frostfs-cli/modules/control/doctor.go
@@ -1,10 +1,10 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/drop_objects.go b/cmd/frostfs-cli/modules/control/drop_objects.go
index 8c0bb2332..dcc1c1229 100644
--- a/cmd/frostfs-cli/modules/control/drop_objects.go
+++ b/cmd/frostfs-cli/modules/control/drop_objects.go
@@ -1,10 +1,10 @@
package control
import (
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go
deleted file mode 100644
index 458e4cc0b..000000000
--- a/cmd/frostfs-cli/modules/control/evacuate_shard.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package control
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "github.com/spf13/cobra"
-)
-
-const ignoreErrorsFlag = "no-errors"
-
-var evacuateShardCmd = &cobra.Command{
- Use: "evacuate",
- Short: "Evacuate objects from shard",
- Long: "Evacuate objects from shard to other shards",
- Run: evacuateShard,
- Deprecated: "use frostfs-cli control shards evacuation start",
-}
-
-func evacuateShard(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
- req.Body.Shard_ID = getShardIDList(cmd)
- req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.EvacuateShardResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.EvacuateShard(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- cmd.Println("Shard has successfully been evacuated.")
-}
-
-func initControlEvacuateShardCmd() {
- initControlFlags(evacuateShardCmd)
-
- flags := evacuateShardCmd.Flags()
- flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
- flags.Bool(shardAllFlag, false, "Process all shards")
- flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
-
- evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
-}
diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go
index 6fa5ed75c..b8d7eb046 100644
--- a/cmd/frostfs-cli/modules/control/evacuation.go
+++ b/cmd/frostfs-cli/modules/control/evacuation.go
@@ -7,19 +7,24 @@ import (
"sync/atomic"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
const (
- awaitFlag = "await"
- noProgressFlag = "no-progress"
- scopeFlag = "scope"
+ awaitFlag = "await"
+ noProgressFlag = "no-progress"
+ scopeFlag = "scope"
+ repOneOnlyFlag = "rep-one-only"
+ ignoreErrorsFlag = "no-errors"
+
+ containerWorkerCountFlag = "container-worker-count"
+ objectWorkerCountFlag = "object-worker-count"
scopeAll = "all"
scopeObjects = "objects"
@@ -64,12 +69,18 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
+ containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag)
+ objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag)
+ repOneOnly, _ := cmd.Flags().GetBool(repOneOnlyFlag)
req := &control.StartShardEvacuationRequest{
Body: &control.StartShardEvacuationRequest_Body{
- Shard_ID: getShardIDList(cmd),
- IgnoreErrors: ignoreErrors,
- Scope: getEvacuationScope(cmd),
+ Shard_ID: getShardIDList(cmd),
+ IgnoreErrors: ignoreErrors,
+ Scope: getEvacuationScope(cmd),
+ ContainerWorkerCount: containerWorkerCount,
+ ObjectWorkerCount: objectWorkerCount,
+ RepOneOnly: repOneOnly,
},
}
@@ -285,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
leftMinutes := int(leftSeconds / 60)
- sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes))
+ fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes)
}
func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@@ -294,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
hour := int(duration.Seconds() / 3600)
minute := int(duration.Seconds()/60) % 60
second := int(duration.Seconds()) % 60
- sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second))
+ fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second)
}
}
func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if resp.GetBody().GetStartedAt() != nil {
startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC()
- sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339)))
+ fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339))
}
}
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if len(resp.GetBody().GetErrorMessage()) > 0 {
- sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage()))
+ fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage())
}
}
@@ -321,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes
default:
status = "undefined"
}
- sb.WriteString(fmt.Sprintf(" Status: %s.", status))
+ fmt.Fprintf(sb, " Status: %s.", status)
}
func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@@ -339,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
}
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
- sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
+ fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
resp.GetBody().GetEvacuatedObjects(),
resp.GetBody().GetTotalObjects(),
resp.GetBody().GetFailedObjects(),
resp.GetBody().GetSkippedObjects(),
resp.GetBody().GetEvacuatedTrees(),
resp.GetBody().GetTotalTrees(),
- resp.GetBody().GetFailedTrees()))
+ resp.GetBody().GetFailedTrees())
}
func initControlEvacuationShardCmd() {
@@ -371,6 +382,9 @@ func initControlStartEvacuationShardCmd() {
flags.String(scopeFlag, scopeAll, fmt.Sprintf("Evacuation scope; possible values: %s, %s, %s", scopeTrees, scopeObjects, scopeAll))
flags.Bool(awaitFlag, false, "Block execution until evacuation is completed")
flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag))
+ flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers")
+ flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers")
+ flags.Bool(repOneOnlyFlag, false, "Evacuate objects only from containers with policy 'REP 1 ...'")
startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/flush_cache.go b/cmd/frostfs-cli/modules/control/flush_cache.go
index 541961903..280aacfad 100644
--- a/cmd/frostfs-cli/modules/control/flush_cache.go
+++ b/cmd/frostfs-cli/modules/control/flush_cache.go
@@ -1,10 +1,10 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/get_rule.go b/cmd/frostfs-cli/modules/control/get_rule.go
index 050cf165c..4da903a9a 100644
--- a/cmd/frostfs-cli/modules/control/get_rule.go
+++ b/cmd/frostfs-cli/modules/control/get_rule.go
@@ -3,11 +3,11 @@ package control
import (
"encoding/hex"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
@@ -24,8 +24,8 @@ func getRule(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
- chainID, _ := cmd.Flags().GetString(chainIDFlag)
- hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
+ chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag)
+ hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag)
if hexEncoded {
chainIDBytes, err := hex.DecodeString(chainID)
@@ -56,16 +56,16 @@ func getRule(cmd *cobra.Command, _ []string) {
var chain apechain.Chain
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(resp.GetBody().GetChain()))
- util.PrintHumanReadableAPEChain(cmd, &chain)
+ apecmd.PrintHumanReadableAPEChain(cmd, &chain)
}
func initControGetRuleCmd() {
initControlFlags(getRuleCmd)
ff := getRuleCmd.Flags()
- ff.String(targetNameFlag, "", targetNameDesc)
- ff.String(targetTypeFlag, "", targetTypeDesc)
- _ = getRuleCmd.MarkFlagRequired(targetTypeFlag)
- ff.String(chainIDFlag, "", "Chain id")
- ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc)
+ ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc)
+ _ = getRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag)
+ ff.String(apecmd.ChainIDFlag, "", "Chain id")
+ ff.Bool(apecmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
}
diff --git a/cmd/frostfs-cli/modules/control/healthcheck.go b/cmd/frostfs-cli/modules/control/healthcheck.go
index 2241a403f..1d4441f1e 100644
--- a/cmd/frostfs-cli/modules/control/healthcheck.go
+++ b/cmd/frostfs-cli/modules/control/healthcheck.go
@@ -3,11 +3,11 @@ package control
import (
"os"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/ir_healthcheck.go b/cmd/frostfs-cli/modules/control/ir_healthcheck.go
index 4f272c1b4..373f21c30 100644
--- a/cmd/frostfs-cli/modules/control/ir_healthcheck.go
+++ b/cmd/frostfs-cli/modules/control/ir_healthcheck.go
@@ -3,12 +3,12 @@ package control
import (
"os"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/ir_remove_container.go b/cmd/frostfs-cli/modules/control/ir_remove_container.go
index a66d7e06d..460e299e5 100644
--- a/cmd/frostfs-cli/modules/control/ir_remove_container.go
+++ b/cmd/frostfs-cli/modules/control/ir_remove_container.go
@@ -1,13 +1,13 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
diff --git a/cmd/frostfs-cli/modules/control/ir_remove_node.go b/cmd/frostfs-cli/modules/control/ir_remove_node.go
index 412dc7934..2fe686d63 100644
--- a/cmd/frostfs-cli/modules/control/ir_remove_node.go
+++ b/cmd/frostfs-cli/modules/control/ir_remove_node.go
@@ -4,11 +4,11 @@ import (
"encoding/hex"
"errors"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
index 6965b5dca..5f09e92c1 100644
--- a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
+++ b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
@@ -1,11 +1,11 @@
package control
import (
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/list_rules.go b/cmd/frostfs-cli/modules/control/list_rules.go
index f5fc27bda..a6c65d083 100644
--- a/cmd/frostfs-cli/modules/control/list_rules.go
+++ b/cmd/frostfs-cli/modules/control/list_rules.go
@@ -1,18 +1,16 @@
package control
import (
- "errors"
"fmt"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "github.com/nspcc-dev/neo-go/cli/input"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/spf13/cobra"
)
@@ -23,65 +21,25 @@ var listRulesCmd = &cobra.Command{
Run: listRules,
}
-const (
- defaultNamespace = "root"
- namespaceTarget = "namespace"
- containerTarget = "container"
- userTarget = "user"
- groupTarget = "group"
-)
-
-const (
- targetNameFlag = "target-name"
- targetNameDesc = "Resource name in APE resource name format"
- targetTypeFlag = "target-type"
- targetTypeDesc = "Resource type(container/namespace)"
-)
-
-var (
- errSettingDefaultValueWasDeclined = errors.New("setting default value was declined")
- errUnknownTargetType = errors.New("unknown target type")
-)
+var engineToControlSvcType = map[policyengine.TargetType]control.ChainTarget_TargetType{
+ policyengine.Namespace: control.ChainTarget_NAMESPACE,
+ policyengine.Container: control.ChainTarget_CONTAINER,
+ policyengine.User: control.ChainTarget_USER,
+ policyengine.Group: control.ChainTarget_GROUP,
+}
func parseTarget(cmd *cobra.Command) *control.ChainTarget {
- typ, _ := cmd.Flags().GetString(targetTypeFlag)
- name, _ := cmd.Flags().GetString(targetNameFlag)
- switch typ {
- case namespaceTarget:
- if name == "" {
- ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace))
- commonCmd.ExitOnErr(cmd, "read line error: %w", err)
- ln = strings.ToLower(ln)
- if len(ln) > 0 && (ln[0] == 'n') {
- commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined)
- }
- name = defaultNamespace
- }
- return &control.ChainTarget{
- Name: name,
- Type: control.ChainTarget_NAMESPACE,
- }
- case containerTarget:
- var cnr cid.ID
- commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
- return &control.ChainTarget{
- Name: name,
- Type: control.ChainTarget_CONTAINER,
- }
- case userTarget:
- return &control.ChainTarget{
- Name: name,
- Type: control.ChainTarget_USER,
- }
- case groupTarget:
- return &control.ChainTarget{
- Name: name,
- Type: control.ChainTarget_GROUP,
- }
- default:
- commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
+ target := apeCmd.ParseTarget(cmd)
+
+ typ, ok := engineToControlSvcType[target.Type]
+ if !ok {
+ commonCmd.ExitOnErr(cmd, "%w", fmt.Errorf("unknown type '%c", target.Type))
+ }
+
+ return &control.ChainTarget{
+ Name: target.Name,
+ Type: typ,
}
- return nil
}
func listRules(cmd *cobra.Command, _ []string) {
@@ -117,7 +75,7 @@ func listRules(cmd *cobra.Command, _ []string) {
for _, c := range chains {
var chain apechain.Chain
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(c))
- util.PrintHumanReadableAPEChain(cmd, &chain)
+ apeCmd.PrintHumanReadableAPEChain(cmd, &chain)
}
}
@@ -125,7 +83,7 @@ func initControlListRulesCmd() {
initControlFlags(listRulesCmd)
ff := listRulesCmd.Flags()
- ff.String(targetNameFlag, "", targetNameDesc)
- ff.String(targetTypeFlag, "", targetTypeDesc)
- _ = listRulesCmd.MarkFlagRequired(targetTypeFlag)
+ ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
+ _ = listRulesCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go
index 6a988c355..3142d02e7 100644
--- a/cmd/frostfs-cli/modules/control/list_targets.go
+++ b/cmd/frostfs-cli/modules/control/list_targets.go
@@ -2,26 +2,20 @@ package control
import (
"bytes"
- "crypto/sha256"
"fmt"
"strconv"
"text/tabwriter"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
-const (
- chainNameFlag = "chain-name"
- chainNameFlagUsage = "Chain name(ingress|s3)"
-)
-
var listTargetsCmd = &cobra.Command{
Use: "list-targets",
Short: "List local targets",
@@ -32,15 +26,11 @@ var listTargetsCmd = &cobra.Command{
func listTargets(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
- var cnr cid.ID
- chainName, _ := cmd.Flags().GetString(chainNameFlag)
-
- rawCID := make([]byte, sha256.Size)
- cnr.Encode(rawCID)
+ chainName := apeCmd.ParseChainName(cmd)
req := &control.ListTargetsLocalOverridesRequest{
Body: &control.ListTargetsLocalOverridesRequest_Body{
- ChainName: chainName,
+ ChainName: string(chainName),
},
}
@@ -72,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write([]byte("#\tName\tType\n"))
for i, t := range targets {
- _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
+ _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
}
_ = tw.Flush()
cmd.Print(buf.String())
@@ -82,7 +72,7 @@ func initControlListTargetsCmd() {
initControlFlags(listTargetsCmd)
ff := listTargetsCmd.Flags()
- ff.String(chainNameFlag, "", chainNameFlagUsage)
+ ff.String(apeCmd.ChainNameFlag, "", apeCmd.ChainNameFlagDesc)
- _ = cobra.MarkFlagRequired(ff, chainNameFlag)
+ _ = cobra.MarkFlagRequired(ff, apeCmd.ChainNameFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go
new file mode 100644
index 000000000..4cb4be539
--- /dev/null
+++ b/cmd/frostfs-cli/modules/control/locate.go
@@ -0,0 +1,117 @@
+package control
+
+import (
+ "bytes"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/mr-tron/base58"
+ "github.com/spf13/cobra"
+)
+
+const (
+ FullInfoFlag = "full"
+ FullInfoFlagUsage = "Print full ShardInfo."
+)
+
+var locateObjectCmd = &cobra.Command{
+ Use: "locate-object",
+ Short: "List shards storing the object",
+ Long: "List shards storing the object",
+ Run: locateObject,
+}
+
+func initControlLocateObjectCmd() {
+ initControlFlags(locateObjectCmd)
+
+ flags := locateObjectCmd.Flags()
+
+ flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
+ _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
+
+ flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
+ _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
+
+ flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
+ flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
+}
+
+func locateObject(cmd *cobra.Command, _ []string) {
+ var cnr cid.ID
+ var obj oid.ID
+
+ _ = object.ReadObjectAddress(cmd, &cnr, &obj)
+
+ pk := key.Get(cmd)
+
+ body := new(control.ListShardsForObjectRequest_Body)
+ body.SetContainerId(cnr.EncodeToString())
+ body.SetObjectId(obj.EncodeToString())
+ req := new(control.ListShardsForObjectRequest)
+ req.SetBody(body)
+ signRequest(cmd, pk, req)
+
+ cli := getClient(cmd, pk)
+
+ var err error
+ var resp *control.ListShardsForObjectResponse
+ err = cli.ExecRaw(func(client *rawclient.Client) error {
+ resp, err = control.ListShardsForObject(client, req)
+ return err
+ })
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
+
+ shardIDs := resp.GetBody().GetShard_ID()
+
+ isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
+ if !isFull {
+ for _, id := range shardIDs {
+ cmd.Println(base58.Encode(id))
+ }
+ return
+ }
+
+ // get full shard info
+ listShardsReq := new(control.ListShardsRequest)
+ listShardsReq.SetBody(new(control.ListShardsRequest_Body))
+ signRequest(cmd, pk, listShardsReq)
+ var listShardsResp *control.ListShardsResponse
+ err = cli.ExecRaw(func(client *rawclient.Client) error {
+ listShardsResp, err = control.ListShards(client, listShardsReq)
+ return err
+ })
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
+
+ shards := listShardsResp.GetBody().GetShards()
+ sortShardsByID(shards)
+ shards = filterShards(shards, shardIDs)
+
+ isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
+ if isJSON {
+ prettyPrintShardsJSON(cmd, shards)
+ } else {
+ prettyPrintShards(cmd, shards)
+ }
+}
+
+func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
+ var res []control.ShardInfo
+ for _, id := range ids {
+ for _, inf := range info {
+ if bytes.Equal(inf.Shard_ID, id) {
+ res = append(res, inf)
+ }
+ }
+ }
+ return res
+}
diff --git a/cmd/frostfs-cli/modules/control/rebuild_shards.go b/cmd/frostfs-cli/modules/control/rebuild_shards.go
index e2b408712..3df12a15d 100644
--- a/cmd/frostfs-cli/modules/control/rebuild_shards.go
+++ b/cmd/frostfs-cli/modules/control/rebuild_shards.go
@@ -3,10 +3,10 @@ package control
import (
"fmt"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/remove_rule.go b/cmd/frostfs-cli/modules/control/remove_rule.go
index 4189ea76b..036317bcb 100644
--- a/cmd/frostfs-cli/modules/control/remove_rule.go
+++ b/cmd/frostfs-cli/modules/control/remove_rule.go
@@ -4,19 +4,14 @@ import (
"encoding/hex"
"errors"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
-const (
- chainIDFlag = "chain-id"
- chainIDHexFlag = "chain-id-hex"
- allFlag = "all"
-)
-
var (
errEmptyChainID = errors.New("chain id cannot be empty")
@@ -30,8 +25,8 @@ var (
func removeRule(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
- hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
- removeAll, _ := cmd.Flags().GetBool(allFlag)
+ hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag)
+ removeAll, _ := cmd.Flags().GetBool(apecmd.AllFlag)
if removeAll {
req := &control.RemoveChainLocalOverridesByTargetRequest{
Body: &control.RemoveChainLocalOverridesByTargetRequest_Body{
@@ -52,7 +47,7 @@ func removeRule(cmd *cobra.Command, _ []string) {
return
}
- chainID, _ := cmd.Flags().GetString(chainIDFlag)
+ chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag)
if chainID == "" {
commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID)
}
@@ -92,11 +87,11 @@ func initControlRemoveRuleCmd() {
initControlFlags(removeRuleCmd)
ff := removeRuleCmd.Flags()
- ff.String(targetNameFlag, "", targetNameDesc)
- ff.String(targetTypeFlag, "", targetTypeDesc)
- _ = removeRuleCmd.MarkFlagRequired(targetTypeFlag)
- ff.String(chainIDFlag, "", "Chain id")
- ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
- ff.Bool(allFlag, false, "Remove all chains")
- removeRuleCmd.MarkFlagsMutuallyExclusive(allFlag, chainIDFlag)
+ ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc)
+ ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc)
+ _ = removeRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag)
+ ff.String(apecmd.ChainIDFlag, "", apecmd.ChainIDFlagDesc)
+ ff.Bool(apecmd.ChainIDHexFlag, false, apecmd.ChainIDHexFlagDesc)
+ ff.Bool(apecmd.AllFlag, false, "Remove all chains")
+ removeRuleCmd.MarkFlagsMutuallyExclusive(apecmd.AllFlag, apecmd.ChainIDFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go
index b20d3618e..3abfe80cb 100644
--- a/cmd/frostfs-cli/modules/control/root.go
+++ b/cmd/frostfs-cli/modules/control/root.go
@@ -39,6 +39,7 @@ func init() {
listRulesCmd,
getRuleCmd,
listTargetsCmd,
+ locateObjectCmd,
)
initControlHealthCheckCmd()
@@ -52,4 +53,5 @@ func init() {
initControlListRulesCmd()
initControGetRuleCmd()
initControlListTargetsCmd()
+ initControlLocateObjectCmd()
}
diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go
index a107b2b53..26a1ba883 100644
--- a/cmd/frostfs-cli/modules/control/set_netmap_status.go
+++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go
@@ -6,12 +6,12 @@ import (
"fmt"
"time"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
@@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.
var resp *control.GetNetmapStatusResponse
var err error
err = cli.ExecRaw(func(client *rawclient.Client) error {
- resp, err = control.GetNetmapStatus(client, req)
+ resp, err = control.GetNetmapStatus(cmd.Context(), client, req)
return err
})
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index 329cb9100..3483f5d62 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -13,7 +13,6 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd)
- shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(evacuationShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd)
@@ -23,7 +22,6 @@ func initControlShardsCmd() {
initControlShardsListCmd()
initControlSetShardModeCmd()
- initControlEvacuateShardCmd()
initControlEvacuationShardCmd()
initControlFlushCacheCmd()
initControlDoctorCmd()
diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go
index a81034a9e..40d6628ee 100644
--- a/cmd/frostfs-cli/modules/control/shards_list.go
+++ b/cmd/frostfs-cli/modules/control/shards_list.go
@@ -7,11 +7,11 @@ import (
"sort"
"strings"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/shards_set_mode.go b/cmd/frostfs-cli/modules/control/shards_set_mode.go
index dd0d77748..8fe01ba30 100644
--- a/cmd/frostfs-cli/modules/control/shards_set_mode.go
+++ b/cmd/frostfs-cli/modules/control/shards_set_mode.go
@@ -6,10 +6,10 @@ import (
"slices"
"strings"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/synchronize_tree.go b/cmd/frostfs-cli/modules/control/synchronize_tree.go
index 5f2e4da96..1e4575f49 100644
--- a/cmd/frostfs-cli/modules/control/synchronize_tree.go
+++ b/cmd/frostfs-cli/modules/control/synchronize_tree.go
@@ -4,12 +4,12 @@ import (
"crypto/sha256"
"errors"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/util.go b/cmd/frostfs-cli/modules/control/util.go
index ef547681f..41d9dbf8a 100644
--- a/cmd/frostfs-cli/modules/control/util.go
+++ b/cmd/frostfs-cli/modules/control/util.go
@@ -4,11 +4,11 @@ import (
"crypto/ecdsa"
"errors"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"github.com/spf13/cobra"
diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go
index ffe9009ab..d0c9a641b 100644
--- a/cmd/frostfs-cli/modules/control/writecache.go
+++ b/cmd/frostfs-cli/modules/control/writecache.go
@@ -1,10 +1,10 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
@@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{
var sealWritecacheShardCmd = &cobra.Command{
Use: "seal",
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
- Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.",
+ Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.",
Run: sealWritecache,
}
diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
index b6ec48f35..5da66dcd9 100644
--- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go
+++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
@@ -49,24 +49,24 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) {
cmd.Println("key:", hex.EncodeToString(i.PublicKey()))
var stateWord string
- switch {
+ switch i.Status() {
default:
stateWord = ""
- case i.IsOnline():
+ case netmap.Online:
stateWord = "online"
- case i.IsOffline():
+ case netmap.Offline:
stateWord = "offline"
- case i.IsMaintenance():
+ case netmap.Maintenance:
stateWord = "maintenance"
}
cmd.Println("state:", stateWord)
- netmap.IterateNetworkEndpoints(i, func(s string) {
+ for s := range i.NetworkEndpoints() {
cmd.Println("address:", s)
- })
+ }
- i.IterateAttributes(func(key, value string) {
+ for key, value := range i.Attributes() {
cmd.Printf("attribute: %s=%s\n", key, value)
- })
+ }
}
diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go
index e4e9cddb8..08a9ac4c8 100644
--- a/cmd/frostfs-cli/modules/object/delete.go
+++ b/cmd/frostfs-cli/modules/object/delete.go
@@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
}
- objAddr = readObjectAddress(cmd, &cnr, &obj)
+ objAddr = ReadObjectAddress(cmd, &cnr, &obj)
}
pk := key.GetOrGenerate(cmd)
diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go
index f1edccba2..7312f5384 100644
--- a/cmd/frostfs-cli/modules/object/get.go
+++ b/cmd/frostfs-cli/modules/object/get.go
@@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := readObjectAddress(cmd, &cnr, &obj)
+ objAddr := ReadObjectAddress(cmd, &cnr, &obj)
filename := cmd.Flag(fileFlag).Value.String()
out, closer := createOutWriter(cmd, filename)
diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go
index 26243e7e7..25df375d4 100644
--- a/cmd/frostfs-cli/modules/object/hash.go
+++ b/cmd/frostfs-cli/modules/object/hash.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
@@ -42,7 +41,9 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
+ flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...")
+ _ = objectHashCmd.MarkFlagRequired("range")
+
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
}
@@ -51,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := readObjectAddress(cmd, &cnr, &obj)
+ objAddr := ReadObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@@ -66,36 +67,6 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
- tz := typ == hashTz
- fullHash := len(ranges) == 0
- if fullHash {
- var headPrm internalclient.HeadObjectPrm
- headPrm.SetClient(cli)
- Prepare(cmd, &headPrm)
- headPrm.SetAddress(objAddr)
-
- // get hash of full payload through HEAD (may be user can do it through dedicated command?)
- res, err := internalclient.HeadObject(cmd.Context(), headPrm)
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- var cs checksum.Checksum
- var csSet bool
-
- if tz {
- cs, csSet = res.Header().PayloadHomomorphicHash()
- } else {
- cs, csSet = res.Header().PayloadChecksum()
- }
-
- if csSet {
- cmd.Println(hex.EncodeToString(cs.Value()))
- } else {
- cmd.Println("Missing checksum in object header.")
- }
-
- return
- }
-
var hashPrm internalclient.HashPayloadRangesPrm
hashPrm.SetClient(cli)
Prepare(cmd, &hashPrm)
@@ -104,7 +75,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
hashPrm.SetSalt(salt)
hashPrm.SetRanges(ranges)
- if tz {
+ if typ == hashTz {
hashPrm.TZ()
}
diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go
index 14797dc41..97e996cad 100644
--- a/cmd/frostfs-cli/modules/object/head.go
+++ b/cmd/frostfs-cli/modules/object/head.go
@@ -6,12 +6,12 @@ import (
"fmt"
"os"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -38,7 +38,6 @@ func initObjectHeadCmd() {
_ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String(fileFlag, "", "File to write header to. Default: stdout.")
- flags.Bool("main-only", false, "Return only main fields")
flags.Bool(commonflags.JSON, false, "Marshal output in JSON")
flags.Bool("proto", false, "Marshal output in Protobuf")
flags.Bool(rawFlag, false, rawFlagDesc)
@@ -48,8 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := readObjectAddress(cmd, &cnr, &obj)
- mainOnly, _ := cmd.Flags().GetBool("main-only")
+ objAddr := ReadObjectAddress(cmd, &cnr, &obj)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
@@ -62,7 +60,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
raw, _ := cmd.Flags().GetBool(rawFlag)
prm.SetRawFlag(raw)
prm.SetAddress(objAddr)
- prm.SetMainOnlyFlag(mainOnly)
res, err := internalclient.HeadObject(cmd.Context(), prm)
if err != nil {
diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go
index d2e9af24c..d67db9f0d 100644
--- a/cmd/frostfs-cli/modules/object/lock.go
+++ b/cmd/frostfs-cli/modules/object/lock.go
@@ -7,17 +7,18 @@ import (
"strconv"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
+ "github.com/spf13/viper"
)
// object lock command.
@@ -78,7 +79,7 @@ var objectLockCmd = &cobra.Command{
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
- endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
+ endpoint := viper.GetString(commonflags.RPC)
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err)
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index 0eac4e6d2..476238651 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -1,8 +1,6 @@
package object
import (
- "bytes"
- "cmp"
"context"
"crypto/ecdsa"
"encoding/hex"
@@ -51,6 +49,12 @@ type ecHeader struct {
parent oid.ID
}
+type objectCounter struct {
+ sync.Mutex
+ total uint32
+ isECcounted bool
+}
+
type objectPlacement struct {
requiredNodes []netmapSDK.NodeInfo
confirmedNodes []netmapSDK.NodeInfo
@@ -59,6 +63,7 @@ type objectPlacement struct {
type objectNodesResult struct {
errors []error
placements map[oid.ID]objectPlacement
+ total uint32
}
type ObjNodesDataObject struct {
@@ -104,23 +109,23 @@ func initObjectNodesCmd() {
func objectNodes(cmd *cobra.Command, _ []string) {
var cnrID cid.ID
var objID oid.ID
- readObjectAddress(cmd, &cnrID, &objID)
+ ReadObjectAddress(cmd, &cnrID, &objID)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
- objects := getPhyObjects(cmd, cnrID, objID, cli, pk)
+ objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk)
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
result := getRequiredPlacement(cmd, objects, placementPolicy, netmap)
- getActualPlacement(cmd, netmap, pk, objects, result)
+ getActualPlacement(cmd, netmap, pk, objects, count, result)
printPlacement(cmd, objID, objects, result)
}
-func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject {
+func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@@ -148,7 +153,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
parent: res.Header().ECHeader().Parent(),
}
}
- return []phyObject{obj}
+ return []phyObject{obj}, 1
}
var errSplitInfo *objectSDK.SplitInfoError
@@ -158,29 +163,34 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
- return getECObjectChunks(cmd, cnrID, objID, ecInfoError)
+ return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1
}
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
- return nil
+ return nil, 0
}
-func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject {
- members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
- return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead)
+func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) {
+ members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
+ return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total
}
-func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
+func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) {
+ var total int
splitInfo := errSplitInfo.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
- return members
+ if total = len(members); total > 0 {
+ total-- // linking object is not data object
+ }
+ return members, total
}
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
- return members
+ return members, len(members)
}
- return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
+ members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
+ return members, len(members)
}
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
@@ -195,7 +205,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
prmHead.SetRawFlag(true) // to get an error instead of whole object
eg, egCtx := errgroup.WithContext(cmd.Context())
- for idx := range len(members) {
+ for idx := range members {
partObjID := members[idx]
eg.Go(func() error {
@@ -323,7 +333,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem
}
placementBuilder := placement.NewNetworkMapBuilder(netmap)
for _, object := range objects {
- placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
+ placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
@@ -361,7 +371,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
placementObjectID = object.ecHeader.parent
}
placementBuilder := placement.NewNetworkMapBuilder(netmap)
- placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
+ placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
for _, vector := range placement {
@@ -386,8 +396,11 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
}
}
-func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) {
+func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) {
resultMtx := &sync.Mutex{}
+ counter := &objectCounter{
+ total: uint32(count),
+ }
candidates := getNodesToCheckObjectExistance(cmd, netmap, result)
@@ -404,7 +417,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
for _, object := range objects {
eg.Go(func() error {
- stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
+ stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter)
resultMtx.Lock()
defer resultMtx.Unlock()
if err == nil && stored {
@@ -423,6 +436,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
}
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
+ result.total = counter.total
}
func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo {
@@ -447,17 +461,11 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
var cli *client.Client
var addresses []string
if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal {
- candidate.IterateNetworkEndpoints(func(s string) bool {
- addresses = append(addresses, s)
- return false
- })
+ addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints())
addresses = append(addresses, candidate.ExternalAddresses()...)
} else {
addresses = append(addresses, candidate.ExternalAddresses()...)
- candidate.IterateNetworkEndpoints(func(s string) bool {
- addresses = append(addresses, s)
- return false
- })
+ addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints())
}
var lastErr error
@@ -481,7 +489,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
return cli, nil
}
-func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) {
+func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@@ -496,6 +504,14 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
res, err := internalclient.HeadObject(ctx, prmHead)
if err == nil && res != nil {
+ if res.Header().ECHeader() != nil {
+ counter.Lock()
+ defer counter.Unlock()
+ if !counter.isECcounted {
+ counter.total *= res.Header().ECHeader().Total()
+ }
+ counter.isECcounted = true
+ }
return true, nil
}
var notFound *apistatus.ObjectNotFound
@@ -507,7 +523,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
}
func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
- normilizeObjectNodesResult(objects, result)
if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
printObjectNodesAsJSON(cmd, objID, objects, result)
} else {
@@ -515,36 +530,9 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
}
}
-func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) {
- slices.SortFunc(objects, func(lhs, rhs phyObject) int {
- if lhs.ecHeader == nil && rhs.ecHeader == nil {
- return bytes.Compare(lhs.objectID[:], rhs.objectID[:])
- }
- if lhs.ecHeader == nil {
- return -1
- }
- if rhs.ecHeader == nil {
- return 1
- }
- if lhs.ecHeader.parent == rhs.ecHeader.parent {
- return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index)
- }
- return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:])
- })
- for _, obj := range objects {
- op := result.placements[obj.objectID]
- slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
- return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
- })
- slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
- return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
- })
- result.placements[obj.objectID] = op
- }
-}
-
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
- fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
+ fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total)
+ fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects))
for _, object := range objects {
fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID)
diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go
index 8f03885ab..ebbde76a2 100644
--- a/cmd/frostfs-cli/modules/object/patch.go
+++ b/cmd/frostfs-cli/modules/object/patch.go
@@ -2,6 +2,7 @@ package object
import (
"fmt"
+ "os"
"strconv"
"strings"
@@ -9,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -20,6 +22,7 @@ const (
replaceAttrsFlagName = "replace-attrs"
rangeFlagName = "range"
payloadFlagName = "payload"
+ splitHeaderFlagName = "split-header"
)
var objectPatchCmd = &cobra.Command{
@@ -46,17 +49,18 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
+ flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
+ flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
}
func patch(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := readObjectAddress(cmd, &cnr, &obj)
+ objAddr := ReadObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeSlice(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@@ -84,6 +88,8 @@ func patch(cmd *cobra.Command, _ []string) {
prm.NewAttributes = newAttrs
prm.ReplaceAttribute = replaceAttrs
+ prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
+
for i := range ranges {
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
Range: ranges[i],
@@ -99,11 +105,9 @@ func patch(cmd *cobra.Command, _ []string) {
}
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
- var rawAttrs []string
-
- raw := cmd.Flag(newAttrsFlagName).Value.String()
- if len(raw) != 0 {
- rawAttrs = strings.Split(raw, ",")
+ rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName)
+ if err != nil {
+ return nil, err
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
@@ -149,3 +153,22 @@ func patchPayloadPaths(cmd *cobra.Command) []string {
v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
return v
}
+
+func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
+ path, _ := cmd.Flags().GetString(splitHeaderFlagName)
+ if path == "" {
+ return nil
+ }
+
+ data, err := os.ReadFile(path)
+ commonCmd.ExitOnErr(cmd, "read file error: %w", err)
+
+ splitHdrV2 := new(objectV2.SplitHeader)
+ err = splitHdrV2.Unmarshal(data)
+ if err != nil {
+ err = splitHdrV2.UnmarshalJSON(data)
+ commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
+ }
+
+ return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
+}
diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go
index 45e02edb3..9e8a7cc6f 100644
--- a/cmd/frostfs-cli/modules/object/put.go
+++ b/cmd/frostfs-cli/modules/object/put.go
@@ -10,11 +10,11 @@ import (
"strings"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -50,7 +50,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
+ flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@@ -214,11 +214,9 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
}
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
- var rawAttrs []string
-
- raw := cmd.Flag("attributes").Value.String()
- if len(raw) != 0 {
- rawAttrs = strings.Split(raw, ",")
+ rawAttrs, err := cmd.Flags().GetStringSlice("attributes")
+ if err != nil {
+ return nil, err
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go
index ad4bc3d59..6ec508ae2 100644
--- a/cmd/frostfs-cli/modules/object/range.go
+++ b/cmd/frostfs-cli/modules/object/range.go
@@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.String("range", "", "Range to take data from in the form offset:length")
+ flags.StringSlice("range", nil, "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc)
}
@@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := readObjectAddress(cmd, &cnr, &obj)
+ objAddr := ReadObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool {
if ok {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto")
- if !(toJSON || toProto) {
+ if !toJSON && !toProto {
cmd.PrintErrln("Object is erasure-encoded, ec information received.")
}
printECInfo(cmd, errECInfo.ECInfo())
@@ -195,11 +195,10 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
}
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
- v := cmd.Flag("range").Value.String()
- if len(v) == 0 {
- return nil, nil
+ vs, err := cmd.Flags().GetStringSlice("range")
+ if len(vs) == 0 || err != nil {
+ return nil, err
}
- vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs))
for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep)
diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go
index b090c9f8c..8e4e8b287 100644
--- a/cmd/frostfs-cli/modules/object/util.go
+++ b/cmd/frostfs-cli/modules/object/util.go
@@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
return xs
}
-func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
+func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr)
readOID(cmd, obj)
@@ -262,13 +262,8 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client
if _, ok := dst.(*internal.DeleteObjectPrm); ok {
common.PrintVerbose(cmd, "Collecting relatives of the removal object...")
- rels := collectObjectRelatives(cmd, cli, cnr, *obj)
-
- if len(rels) == 0 {
- objs = []oid.ID{*obj}
- } else {
- objs = append(rels, *obj)
- }
+ objs = collectObjectRelatives(cmd, cli, cnr, *obj)
+ objs = append(objs, *obj)
}
}
diff --git a/cmd/frostfs-cli/modules/root.go b/cmd/frostfs-cli/modules/root.go
index 21c367d29..88acab341 100644
--- a/cmd/frostfs-cli/modules/root.go
+++ b/cmd/frostfs-cli/modules/root.go
@@ -21,7 +21,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc"
- "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -112,14 +111,16 @@ func initConfig() {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
- // Find home directory.
- home, err := homedir.Dir()
- commonCmd.ExitOnErr(rootCmd, "", err)
-
- // Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml"
- viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli"))
- viper.SetConfigName("config")
- viper.SetConfigType("yaml")
+ // Find config directory.
+ configDir, err := os.UserConfigDir()
+ if err != nil {
+ common.PrintVerbose(rootCmd, "Get config dir: %s", err)
+ } else {
+ // Search config in `$XDG_CONFIG_HOME/frostfs-cli/` with name "config.yaml"
+ viper.AddConfigPath(filepath.Join(configDir, "frostfs-cli"))
+ viper.SetConfigName("config")
+ viper.SetConfigType("yaml")
+ }
}
viper.SetEnvPrefix(envPrefix)
diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go
index 019feb0ec..e2c05d486 100644
--- a/cmd/frostfs-cli/modules/tree/add.go
+++ b/cmd/frostfs-cli/modules/tree/add.go
@@ -30,8 +30,6 @@ func initAddCmd() {
ff := addCmd.Flags()
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
ff.Uint64(parentIDFlagKey, 0, "Parent node ID")
-
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func add(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/add_by_path.go b/cmd/frostfs-cli/modules/tree/add_by_path.go
index 5d5b00b7d..7263bcd0d 100644
--- a/cmd/frostfs-cli/modules/tree/add_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/add_by_path.go
@@ -36,7 +36,6 @@ func initAddByPathCmd() {
ff.String(pathFlagKey, "", "Path to a node")
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
_ = cobra.MarkFlagRequired(ff, pathFlagKey)
}
diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go
index 4e0099f02..d71a94b98 100644
--- a/cmd/frostfs-cli/modules/tree/client.go
+++ b/cmd/frostfs-cli/modules/tree/client.go
@@ -2,17 +2,19 @@ package tree
import (
"context"
- "strings"
+ "crypto/tls"
+ "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
- metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@@ -20,27 +22,40 @@ import (
// after making Tree API public.
func _client() (tree.TreeServiceClient, error) {
var netAddr network.Address
- err := netAddr.FromString(viper.GetString(commonflags.RPC))
+
+ rpcEndpoint := viper.GetString(commonflags.RPC)
+ if rpcEndpoint == "" {
+ return nil, fmt.Errorf("%s is not defined", commonflags.RPC)
+ }
+
+ err := netAddr.FromString(rpcEndpoint)
if err != nil {
return nil, err
}
+ host, isTLS, err := client.ParseURI(netAddr.URIAddr())
+ if err != nil {
+ return nil, err
+ }
+
+ creds := insecure.NewCredentials()
+ if isTLS {
+ creds = credentials.NewTLS(&tls.Config{})
+ }
+
opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
- metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInteceptor(),
+ tracing.NewUnaryClientInterceptor(),
),
grpc.WithChainStreamInterceptor(
- metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
),
+ grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
+ grpc.WithDisableServiceConfig(),
+ grpc.WithTransportCredentials(creds),
}
- if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
- opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
- }
-
- cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
+ cc, err := grpc.NewClient(host, opts...)
return tree.NewTreeServiceClient(cc), err
}
diff --git a/cmd/frostfs-cli/modules/tree/get_by_path.go b/cmd/frostfs-cli/modules/tree/get_by_path.go
index 7061723fd..210630e60 100644
--- a/cmd/frostfs-cli/modules/tree/get_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/get_by_path.go
@@ -36,8 +36,6 @@ func initGetByPathCmd() {
ff.String(pathFlagKey, "", "Path to a node")
ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node")
-
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getByPath(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/get_op_log.go b/cmd/frostfs-cli/modules/tree/get_op_log.go
index 376aa8e8d..9d767ab3e 100644
--- a/cmd/frostfs-cli/modules/tree/get_op_log.go
+++ b/cmd/frostfs-cli/modules/tree/get_op_log.go
@@ -30,8 +30,6 @@ func initGetOpLogCmd() {
ff := getOpLogCmd.Flags()
ff.Uint64(heightFlagKey, 0, "Height to start with")
ff.Uint64(countFlagKey, 10, "Logged operations count")
-
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getOpLog(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/healthcheck.go b/cmd/frostfs-cli/modules/tree/healthcheck.go
index b01bb2e77..c581b8e26 100644
--- a/cmd/frostfs-cli/modules/tree/healthcheck.go
+++ b/cmd/frostfs-cli/modules/tree/healthcheck.go
@@ -20,8 +20,6 @@ var healthcheckCmd = &cobra.Command{
func initHealthcheckCmd() {
commonflags.Init(healthcheckCmd)
- ff := healthcheckCmd.Flags()
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func healthcheck(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/list.go b/cmd/frostfs-cli/modules/tree/list.go
index f8c0e490f..ee1db2a79 100644
--- a/cmd/frostfs-cli/modules/tree/list.go
+++ b/cmd/frostfs-cli/modules/tree/list.go
@@ -26,8 +26,6 @@ func initListCmd() {
ff := listCmd.Flags()
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = listCmd.MarkFlagRequired(commonflags.CIDFlag)
-
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func list(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go
index dc807d752..7a369bd02 100644
--- a/cmd/frostfs-cli/modules/tree/move.go
+++ b/cmd/frostfs-cli/modules/tree/move.go
@@ -33,8 +33,6 @@ func initMoveCmd() {
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
_ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey)
-
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func move(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/remove.go b/cmd/frostfs-cli/modules/tree/remove.go
index d0b6fab2f..3c532fe26 100644
--- a/cmd/frostfs-cli/modules/tree/remove.go
+++ b/cmd/frostfs-cli/modules/tree/remove.go
@@ -29,8 +29,6 @@ func initRemoveCmd() {
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
-
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func remove(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go
index 83a8909b6..c5f7ad401 100644
--- a/cmd/frostfs-cli/modules/tree/subtree.go
+++ b/cmd/frostfs-cli/modules/tree/subtree.go
@@ -34,8 +34,6 @@ func initGetSubtreeCmd() {
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
-
- _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getSubTree(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/util/convert_eacl.go b/cmd/frostfs-cli/modules/util/convert_eacl.go
index d588ba35d..caa6dfcfe 100644
--- a/cmd/frostfs-cli/modules/util/convert_eacl.go
+++ b/cmd/frostfs-cli/modules/util/convert_eacl.go
@@ -6,7 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
+ apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go
index 137e764ed..13a747ba6 100644
--- a/cmd/frostfs-ir/config.go
+++ b/cmd/frostfs-ir/config.go
@@ -1,13 +1,17 @@
package main
import (
+ "context"
"os"
"os/signal"
+ "strconv"
"syscall"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "github.com/spf13/cast"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@@ -37,16 +41,36 @@ func reloadConfig() error {
}
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
audit.Store(cfg.GetBool("audit.enabled"))
+ var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil {
return err
}
- logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
+ err = logPrm.SetTags(loggerTags())
+ if err != nil {
+ return err
+ }
+ logger.UpdateLevelForTags(logPrm)
- return logPrm.Reload()
+ return nil
}
-func watchForSignal(cancel func()) {
+func loggerTags() [][]string {
+ var res [][]string
+ for i := 0; ; i++ {
+ var item []string
+ index := strconv.FormatInt(int64(i), 10)
+ names := cast.ToString(cfg.Get("logger.tags." + index + ".names"))
+ if names == "" {
+ break
+ }
+ item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level")))
+ res = append(res, item)
+ }
+ return res
+}
+
+func watchForSignal(ctx context.Context, cancel func()) {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
@@ -58,49 +82,49 @@ func watchForSignal(cancel func()) {
// signals causing application to shut down should have priority over
// reconfiguration signal
case <-ch:
- log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
cancel()
- shutdown()
- log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ shutdown(ctx)
+ log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-intErr: // internal application error
- log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
+ log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
cancel()
- shutdown()
+ shutdown(ctx)
return
default:
// block until any signal is receieved
select {
case <-ch:
- log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
cancel()
- shutdown()
- log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ shutdown(ctx)
+ log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-intErr: // internal application error
- log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
+ log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
cancel()
- shutdown()
+ shutdown(ctx)
return
case <-sighupCh:
- log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
- if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
- log.Info(logs.FrostFSNodeSIGHUPSkip)
+ log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
+ if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
+ log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
break
}
err := reloadConfig()
if err != nil {
- log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
- pprofCmp.reload()
- metricsCmp.reload()
- log.Info(logs.FrostFSIRReloadExtraWallets)
+ pprofCmp.reload(ctx)
+ metricsCmp.reload(ctx)
+ log.Info(ctx, logs.FrostFSIRReloadExtraWallets)
err = innerRing.SetExtraWallets(cfg)
if err != nil {
- log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
- innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
- log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+ innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
+ log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
}
}
diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go
index 899918d22..9b775252f 100644
--- a/cmd/frostfs-ir/defaults.go
+++ b/cmd/frostfs-ir/defaults.go
@@ -48,6 +48,8 @@ func defaultConfiguration(cfg *viper.Viper) {
cfg.SetDefault("node.kludge_compatibility_mode", false)
cfg.SetDefault("audit.enabled", false)
+
+ setMultinetDefaults(cfg)
}
func setControlDefaults(cfg *viper.Viper) {
@@ -131,3 +133,11 @@ func setMorphDefaults(cfg *viper.Viper) {
cfg.SetDefault("morph.validators", []string{})
cfg.SetDefault("morph.switch_interval", 2*time.Minute)
}
+
+func setMultinetDefaults(cfg *viper.Viper) {
+ cfg.SetDefault("multinet.enabled", false)
+ cfg.SetDefault("multinet.balancer", "")
+ cfg.SetDefault("multinet.restrict", false)
+ cfg.SetDefault("multinet.fallback_delay", "0s")
+ cfg.SetDefault("multinet.subnets", "")
+}
diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go
index 2792c3548..dd70fc91c 100644
--- a/cmd/frostfs-ir/httpcomponent.go
+++ b/cmd/frostfs-ir/httpcomponent.go
@@ -1,6 +1,7 @@
package main
import (
+ "context"
"net/http"
"time"
@@ -24,8 +25,8 @@ const (
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
)
-func (c *httpComponent) init() {
- log.Info("init " + c.name)
+func (c *httpComponent) init(ctx context.Context) {
+ log.Info(ctx, "init "+c.name)
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
c.address = cfg.GetString(c.name + addressKeyPostfix)
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
@@ -39,14 +40,14 @@ func (c *httpComponent) init() {
httputil.WithShutdownTimeout(c.shutdownDur),
)
} else {
- log.Info(c.name + " is disabled, skip")
+ log.Info(ctx, c.name+" is disabled, skip")
c.srv = nil
}
}
-func (c *httpComponent) start() {
+func (c *httpComponent) start(ctx context.Context) {
if c.srv != nil {
- log.Info("start " + c.name)
+ log.Info(ctx, "start "+c.name)
wg.Add(1)
go func() {
defer wg.Done()
@@ -55,10 +56,10 @@ func (c *httpComponent) start() {
}
}
-func (c *httpComponent) shutdown() error {
+func (c *httpComponent) shutdown(ctx context.Context) error {
if c.srv != nil {
- log.Info("shutdown " + c.name)
- return c.srv.Shutdown()
+ log.Info(ctx, "shutdown "+c.name)
+ return c.srv.Shutdown(ctx)
}
return nil
}
@@ -70,17 +71,17 @@ func (c *httpComponent) needReload() bool {
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
}
-func (c *httpComponent) reload() {
- log.Info("reload " + c.name)
+func (c *httpComponent) reload(ctx context.Context) {
+ log.Info(ctx, "reload "+c.name)
if c.needReload() {
- log.Info(c.name + " config updated")
- if err := c.shutdown(); err != nil {
- log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.String("error", err.Error()),
+ log.Info(ctx, c.name+" config updated")
+ if err := c.shutdown(ctx); err != nil {
+ log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.Error(err),
)
} else {
- c.init()
- c.start()
+ c.init(ctx)
+ c.start(ctx)
}
}
}
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index 4bc5923a0..799feb784 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -31,7 +31,6 @@ const (
var (
wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors
- logPrm = new(logger.Prm)
innerRing *innerring.Server
pprofCmp *pprofComponent
metricsCmp *httpComponent
@@ -70,6 +69,7 @@ func main() {
metrics := irMetrics.NewInnerRingMetrics()
+ var logPrm logger.Prm
err = logPrm.SetLevelString(
cfg.GetString("logger.level"),
)
@@ -80,55 +80,59 @@ func main() {
exitErr(err)
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
+ err = logPrm.SetTags(loggerTags())
+ exitErr(err)
log, err = logger.NewLogger(logPrm)
exitErr(err)
+ logger.UpdateLevelForTags(logPrm)
+
ctx, cancel := context.WithCancel(context.Background())
pprofCmp = newPprofComponent()
- pprofCmp.init()
+ pprofCmp.init(ctx)
metricsCmp = newMetricsComponent()
- metricsCmp.init()
+ metricsCmp.init(ctx)
audit.Store(cfg.GetBool("audit.enabled"))
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit)
exitErr(err)
- pprofCmp.start()
- metricsCmp.start()
+ pprofCmp.start(ctx)
+ metricsCmp.start(ctx)
// start inner ring
err = innerRing.Start(ctx, intErr)
exitErr(err)
- log.Info(logs.CommonApplicationStarted,
+ log.Info(ctx, logs.CommonApplicationStarted,
zap.String("version", misc.Version))
- watchForSignal(cancel)
+ watchForSignal(ctx, cancel)
<-ctx.Done() // graceful shutdown
- log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
+ log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop)
wg.Wait()
- log.Info(logs.FrostFSIRApplicationStopped)
+ log.Info(ctx, logs.FrostFSIRApplicationStopped)
}
-func shutdown() {
- innerRing.Stop()
- if err := metricsCmp.shutdown(); err != nil {
- log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.String("error", err.Error()),
+func shutdown(ctx context.Context) {
+ innerRing.Stop(ctx)
+ if err := metricsCmp.shutdown(ctx); err != nil {
+ log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.Error(err),
)
}
- if err := pprofCmp.shutdown(); err != nil {
- log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.String("error", err.Error()),
+ if err := pprofCmp.shutdown(ctx); err != nil {
+ log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.Error(err),
)
}
if err := sdnotify.ClearStatus(); err != nil {
- log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+ log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}
diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go
index ff5642008..2aebcde7f 100644
--- a/cmd/frostfs-ir/pprof.go
+++ b/cmd/frostfs-ir/pprof.go
@@ -1,6 +1,7 @@
package main
import (
+ "context"
"runtime"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -28,8 +29,8 @@ func newPprofComponent() *pprofComponent {
}
}
-func (c *pprofComponent) init() {
- c.httpComponent.init()
+func (c *pprofComponent) init(ctx context.Context) {
+ c.httpComponent.init(ctx)
if c.enabled {
c.blockRate = cfg.GetInt(pprofBlockRateKey)
@@ -51,17 +52,17 @@ func (c *pprofComponent) needReload() bool {
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
}
-func (c *pprofComponent) reload() {
- log.Info("reload " + c.name)
+func (c *pprofComponent) reload(ctx context.Context) {
+ log.Info(ctx, "reload "+c.name)
if c.needReload() {
- log.Info(c.name + " config updated")
- if err := c.shutdown(); err != nil {
- log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.String("error", err.Error()))
+ log.Info(ctx, c.name+" config updated")
+ if err := c.shutdown(ctx); err != nil {
+ log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.Error(err))
return
}
- c.init()
- c.start()
+ c.init(ctx)
+ c.start(ctx)
}
}
diff --git a/cmd/frostfs-lens/internal/blobovnicza/inspect.go b/cmd/frostfs-lens/internal/blobovnicza/inspect.go
index b1a6e3fd2..e7e2c0769 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/inspect.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/inspect.go
@@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
blz := openBlobovnicza(cmd)
- defer blz.Close()
+ defer blz.Close(cmd.Context())
var prm blobovnicza.GetPrm
prm.SetAddress(addr)
diff --git a/cmd/frostfs-lens/internal/blobovnicza/list.go b/cmd/frostfs-lens/internal/blobovnicza/list.go
index d327dbc41..d41a15bcf 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/list.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/list.go
@@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
}
blz := openBlobovnicza(cmd)
- defer blz.Close()
+ defer blz.Close(cmd.Context())
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))
diff --git a/cmd/frostfs-lens/internal/blobovnicza/root.go b/cmd/frostfs-lens/internal/blobovnicza/root.go
index 9d8ef3dad..2819981d6 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/root.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/root.go
@@ -27,7 +27,7 @@ func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
blobovnicza.WithPath(vPath),
blobovnicza.WithReadOnly(true),
)
- common.ExitOnErr(cmd, blz.Open())
+ common.ExitOnErr(cmd, blz.Open(cmd.Context()))
return blz
}
diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go
index eb4a5ff59..4aa281616 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/tui.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/tui.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
- "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -43,7 +42,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -67,13 +66,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
-
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go
index 9eb60f966..f436343c7 100644
--- a/cmd/frostfs-lens/internal/meta/inspect.go
+++ b/cmd/frostfs-lens/internal/meta/inspect.go
@@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
db := openMeta(cmd)
- defer db.Close()
+ defer db.Close(cmd.Context())
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)
diff --git a/cmd/frostfs-lens/internal/meta/list-garbage.go b/cmd/frostfs-lens/internal/meta/list-garbage.go
index 61b10ca1f..6b27a232f 100644
--- a/cmd/frostfs-lens/internal/meta/list-garbage.go
+++ b/cmd/frostfs-lens/internal/meta/list-garbage.go
@@ -19,7 +19,7 @@ func init() {
func listGarbageFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
- defer db.Close()
+ defer db.Close(cmd.Context())
var garbPrm meta.GarbageIterationPrm
garbPrm.SetHandler(
diff --git a/cmd/frostfs-lens/internal/meta/list-graveyard.go b/cmd/frostfs-lens/internal/meta/list-graveyard.go
index 19a93691c..45642e74b 100644
--- a/cmd/frostfs-lens/internal/meta/list-graveyard.go
+++ b/cmd/frostfs-lens/internal/meta/list-graveyard.go
@@ -19,7 +19,7 @@ func init() {
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
- defer db.Close()
+ defer db.Close(cmd.Context())
var gravePrm meta.GraveyardIterationPrm
gravePrm.SetHandler(
diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go
index 00e8bf117..7b0e25f3d 100644
--- a/cmd/frostfs-lens/internal/meta/tui.go
+++ b/cmd/frostfs-lens/internal/meta/tui.go
@@ -2,9 +2,12 @@ package meta
import (
"context"
+ "encoding/binary"
+ "errors"
"fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
+ schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
@@ -28,6 +31,11 @@ Available search filters:
var initialPrompt string
+var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
+ 2: schema.MetabaseParserV2,
+ 3: schema.MetabaseParserV3,
+}
+
func init() {
common.AddComponentPathFlag(tuiCMD, &vPath)
@@ -44,18 +52,28 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
defer db.Close()
+ schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
+ if !hasVersion {
+ return errors.New("couldn't detect schema version")
+ }
+
+ metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
+ if !ok {
+ return fmt.Errorf("unknown schema version %d", schemaVersion)
+ }
+
// Need if app was stopped with Ctrl-C.
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
app := tview.NewApplication()
- ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
+ ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
@@ -71,12 +89,30 @@ func runTUI(cmd *cobra.Command) error {
return app.Run()
}
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
+var (
+ shardInfoBucket = []byte{5}
+ versionRecord = []byte("version")
+)
+
+func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
+ err := db.View(func(tx *bbolt.Tx) error {
+ bkt := tx.Bucket(shardInfoBucket)
+ if bkt == nil {
+ return nil
+ }
+ rec := bkt.Get(versionRecord)
+ if rec == nil {
+ return nil
+ }
+
+ version = binary.LittleEndian.Uint64(rec)
+ ok = true
+
+ return nil
})
if err != nil {
- return nil, err
+ common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
}
- return db, nil
+
+ return
}
diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go
index 0990e24c3..55051554c 100644
--- a/cmd/frostfs-lens/internal/schema/common/raw.go
+++ b/cmd/frostfs-lens/internal/schema/common/raw.go
@@ -7,6 +7,8 @@ import (
)
type RawEntry struct {
+ // key and value used for record dump.
+ // nolint:unused
key, value []byte
}
diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go
index 9bad19032..077a68785 100644
--- a/cmd/frostfs-lens/internal/schema/common/schema.go
+++ b/cmd/frostfs-lens/internal/schema/common/schema.go
@@ -3,6 +3,8 @@ package common
import (
"errors"
"fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
type FilterResult byte
@@ -71,11 +73,7 @@ func (fp FallbackParser) ToParser() Parser {
func (p Parser) ToFallbackParser() FallbackParser {
return func(key, value []byte) (SchemaEntry, Parser) {
entry, next, err := p(key, value)
- if err != nil {
- panic(fmt.Errorf(
- "couldn't use that parser as a fallback parser, it returned an error: %w", err,
- ))
- }
+ assert.NoError(err, "couldn't use that parser as a fallback parser")
return entry, next
}
}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
index 24cc0e52d..4e6bbf08a 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
@@ -80,10 +80,15 @@ var (
},
)
- UserAttributeParser = NewUserAttributeKeyBucketParser(
+ UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
)
+ UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
+ NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
+ []string{"FilePath", "S3-Access-Box-CRDT-Name"},
+ )
+
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: StrictResolver,
@@ -108,4 +113,14 @@ var (
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
+
+ ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: LenientResolver,
+ })
+
+ ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
index 2fb122940..42a24c594 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
@@ -22,27 +22,31 @@ const (
Split
ContainerCounters
ECInfo
+ ExpirationEpochToObject
+ ObjectToExpirationEpoch
)
var x = map[Prefix]string{
- Graveyard: "Graveyard",
- Garbage: "Garbage",
- ToMoveIt: "To Move It",
- ContainerVolume: "Container Volume",
- Locked: "Locked",
- ShardInfo: "Shard Info",
- Primary: "Primary",
- Lockers: "Lockers",
- Tombstone: "Tombstone",
- Small: "Small",
- Root: "Root",
- Owner: "Owner",
- UserAttribute: "User Attribute",
- PayloadHash: "Payload Hash",
- Parent: "Parent",
- Split: "Split",
- ContainerCounters: "Container Counters",
- ECInfo: "EC Info",
+ Graveyard: "Graveyard",
+ Garbage: "Garbage",
+ ToMoveIt: "To Move It",
+ ContainerVolume: "Container Volume",
+ Locked: "Locked",
+ ShardInfo: "Shard Info",
+ Primary: "Primary",
+ Lockers: "Lockers",
+ Tombstone: "Tombstone",
+ Small: "Small",
+ Root: "Root",
+ Owner: "Owner",
+ UserAttribute: "User Attribute",
+ PayloadHash: "Payload Hash",
+ Parent: "Parent",
+ Split: "Split",
+ ContainerCounters: "Container Counters",
+ ECInfo: "EC Info",
+ ExpirationEpochToObject: "Exp. Epoch to Object",
+ ObjectToExpirationEpoch: "Object to Exp. Epoch",
}
func (p Prefix) String() string {
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
index db90bddbd..62d126f88 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
@@ -9,7 +9,7 @@ import (
func (b *PrefixBucket) String() string {
return common.FormatSimple(
- fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
)
}
@@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
return fmt.Sprintf(
"%s CID %s",
common.FormatSimple(
- fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(b.id.String(), tcell.ColorAqua),
)
@@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
func (b *UserAttributeKeyBucket) String() string {
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
common.FormatSimple(
- fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
index 82b47dd85..7355c3d9e 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
@@ -2,6 +2,7 @@ package buckets
import (
"errors"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -57,10 +58,11 @@ var (
)
var (
- ErrNotBucket = errors.New("not a bucket")
- ErrInvalidKeyLength = errors.New("invalid key length")
- ErrInvalidValueLength = errors.New("invalid value length")
- ErrInvalidPrefix = errors.New("invalid prefix")
+ ErrNotBucket = errors.New("not a bucket")
+ ErrInvalidKeyLength = errors.New("invalid key length")
+ ErrInvalidValueLength = errors.New("invalid value length")
+ ErrInvalidPrefix = errors.New("invalid prefix")
+ ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
)
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
@@ -132,6 +134,10 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
}
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
+ return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
+}
+
+func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if value != nil {
return nil, nil, ErrNotBucket
@@ -147,6 +153,11 @@ func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
return nil, nil, err
}
b.key = string(key[33:])
+
+ if len(keys) != 0 && !slices.Contains(keys, b.key) {
+ return nil, nil, ErrUnexpectedAttributeKey
+ }
+
return &b, next, nil
}
}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go
index ea095e207..4cc9e8765 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/parser.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go
@@ -5,7 +5,30 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
)
-var MetabaseParser = common.WithFallback(
+var MetabaseParserV3 = common.WithFallback(
+ common.Any(
+ buckets.GraveyardParser,
+ buckets.GarbageParser,
+ buckets.ContainerVolumeParser,
+ buckets.LockedParser,
+ buckets.ShardInfoParser,
+ buckets.PrimaryParser,
+ buckets.LockersParser,
+ buckets.TombstoneParser,
+ buckets.SmallParser,
+ buckets.RootParser,
+ buckets.UserAttributeParserV3,
+ buckets.ParentParser,
+ buckets.SplitParser,
+ buckets.ContainerCountersParser,
+ buckets.ECInfoParser,
+ buckets.ExpirationEpochToObjectParser,
+ buckets.ObjectToExpirationEpochParser,
+ ),
+ common.RawParser.ToFallbackParser(),
+)
+
+var MetabaseParserV2 = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
@@ -18,7 +41,7 @@ var MetabaseParser = common.WithFallback(
buckets.SmallParser,
buckets.RootParser,
buckets.OwnerParser,
- buckets.UserAttributeParser,
+ buckets.UserAttributeParserV2,
buckets.PayloadHashParser,
buckets.ParentParser,
buckets.SplitParser,
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
index 2dda15b4f..477c4fc9d 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
@@ -63,3 +63,11 @@ func (r *ContainerCountersRecord) DetailedString() string {
func (r *ECInfoRecord) DetailedString() string {
return spew.Sdump(*r)
}
+
+func (r *ExpirationEpochToObjectRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ObjectToExpirationEpochRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
index 880a7a8ff..e038911d7 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
@@ -143,3 +143,26 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
return common.No
}
}
+
+func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
index 1b070e2a0..5d846cb75 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
@@ -249,3 +249,45 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
}
return &r, nil, nil
}
+
+func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 72 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+
+ var (
+ r ExpirationEpochToObjectRecord
+ err error
+ )
+
+ r.epoch = binary.BigEndian.Uint64(key[:8])
+ if err = r.cnt.Decode(key[8:40]); err != nil {
+ return nil, nil, err
+ }
+ if err = r.obj.Decode(key[40:]); err != nil {
+ return nil, nil, err
+ }
+
+ return &r, nil, nil
+}
+
+func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ if len(value) != 8 {
+ return nil, nil, ErrInvalidValueLength
+ }
+
+ var (
+ r ObjectToExpirationEpochRecord
+ err error
+ )
+
+ if err = r.obj.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ r.epoch = binary.LittleEndian.Uint64(value)
+
+ return &r, nil, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
index a6c70d537..f71244625 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
@@ -2,6 +2,7 @@ package records
import (
"fmt"
+ "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"github.com/gdamore/tcell/v2"
@@ -38,7 +39,7 @@ func (r *ContainerVolumeRecord) String() string {
func (r *LockedRecord) String() string {
return fmt.Sprintf(
- "Locker OID %s %c Locked [%d]OID {...}",
+ "Object OID %s %c Lockers [%d]OID {...}",
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
tview.Borders.Vertical,
len(r.ids),
@@ -133,3 +134,22 @@ func (r *ECInfoRecord) String() string {
len(r.ids),
)
}
+
+func (r *ExpirationEpochToObjectRecord) String() string {
+ return fmt.Sprintf(
+ "exp. epoch %s %c CID %s OID %s",
+ common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
+ )
+}
+
+func (r *ObjectToExpirationEpochRecord) String() string {
+ return fmt.Sprintf(
+ "OID %s %c exp. epoch %s",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
+ )
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
index 34c1c29fd..0809cad1a 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/types.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
@@ -79,4 +79,15 @@ type (
id oid.ID
ids []oid.ID
}
+
+ ExpirationEpochToObjectRecord struct {
+ epoch uint64
+ cnt cid.ID
+ obj oid.ID
+ }
+
+ ObjectToExpirationEpochRecord struct {
+ obj oid.ID
+ epoch uint64
+ }
)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/util.go b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
index f50ebe951..d15d69146 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/util.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
@@ -11,7 +11,7 @@ func DecodeOIDs(data []byte) ([]oid.ID, error) {
size := r.ReadVarUint()
oids := make([]oid.ID, size)
- for i := uint64(0); i < size; i++ {
+ for i := range size {
if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
return nil, err
}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
index 7d70b27b2..3bfe2608b 100644
--- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go
+++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
@@ -57,7 +57,7 @@ func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser,
r.addr.SetContainer(cnr)
r.addr.SetObject(obj)
- r.data = value[:]
+ r.data = value
return &r, nil, nil
}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go
index 3f71c5366..11e6f3fcd 100644
--- a/cmd/frostfs-lens/internal/schema/writecache/types.go
+++ b/cmd/frostfs-lens/internal/schema/writecache/types.go
@@ -16,6 +16,8 @@ type (
DefaultRecord struct {
addr oid.Address
+ // data used for record dump.
+ // nolint:unused
data []byte
}
)
diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go
index 3f5088e7a..2d3b20792 100644
--- a/cmd/frostfs-lens/internal/tui/buckets.go
+++ b/cmd/frostfs-lens/internal/tui/buckets.go
@@ -124,10 +124,7 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path
parser := parentBucket.NextParser
- buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
- if err != nil {
- return err
- }
+ buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
for item := range buffer {
if item.err != nil {
@@ -135,6 +132,7 @@ func (v *BucketsView) loadNodeChildren(
}
bucket := item.val
+ var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil {
return err
@@ -180,10 +178,7 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel()
// Check the current bucket's nested buckets if exist
- bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
- if err != nil {
- return false, err
- }
+ bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
for item := range bucketsBuffer {
if item.err != nil {
@@ -191,6 +186,7 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
b := item.val
+ var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil {
return false, err
@@ -206,10 +202,7 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
// Check the current bucket's nested records if exist
- recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
- if err != nil {
- return false, err
- }
+ recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
for item := range recordsBuffer {
if item.err != nil {
@@ -217,6 +210,7 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
r := item.val
+ var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil {
return false, err
diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go
index d0cf611d4..94fa87f98 100644
--- a/cmd/frostfs-lens/internal/tui/db.go
+++ b/cmd/frostfs-lens/internal/tui/db.go
@@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T,
-) (<-chan Item[T], error) {
+) <-chan Item[T] {
buffer := make(chan Item[T], bufferSize)
go func() {
@@ -77,13 +77,13 @@ func load[T any](
}
}()
- return buffer, nil
+ return buffer
}
func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
-) (<-chan Item[*Bucket], error) {
- buffer, err := load(
+) <-chan Item[*Bucket] {
+ buffer := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value == nil
@@ -98,17 +98,14 @@ func LoadBuckets(
}
},
)
- if err != nil {
- return nil, fmt.Errorf("can't start iterating bucket: %w", err)
- }
- return buffer, nil
+ return buffer
}
func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
-) (<-chan Item[*Record], error) {
- buffer, err := load(
+) <-chan Item[*Record] {
+ buffer := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value != nil
@@ -124,11 +121,8 @@ func LoadRecords(
}
},
)
- if err != nil {
- return nil, fmt.Errorf("can't start iterating bucket: %w", err)
- }
- return buffer, nil
+ return buffer
}
// HasBuckets checks if a bucket has nested buckets. It relies on assumption
@@ -137,24 +131,21 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- buffer, err := load(
+ buffer := load(
ctx, db, path, 1,
nil,
func(_, value []byte) []byte { return value },
)
- if err != nil {
- return false, err
- }
x, ok := <-buffer
if !ok {
return false, nil
}
if x.err != nil {
- return false, err
+ return false, x.err
}
if x.val != nil {
- return false, err
+ return false, nil
}
return true, nil
}
diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go
index 4fdf97119..471514e5d 100644
--- a/cmd/frostfs-lens/internal/tui/input.go
+++ b/cmd/frostfs-lens/internal/tui/input.go
@@ -1,6 +1,8 @@
package tui
import (
+ "slices"
+
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@@ -26,7 +28,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
// Used history data for search prompt, so just make that data recent.
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
- f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
+ f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
f.history = append(f.history, s)
}
@@ -51,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
f.historyPointer++
// Stop iterating over history.
if f.historyPointer == len(f.history) {
- f.InputField.SetText(f.currentContent)
+ f.SetText(f.currentContent)
return
}
- f.InputField.SetText(f.history[f.historyPointer])
+ f.SetText(f.history[f.historyPointer])
case tcell.KeyUp:
if len(f.history) == 0 {
return
}
// Start iterating over history.
if f.historyPointer == len(f.history) {
- f.currentContent = f.InputField.GetText()
+ f.currentContent = f.GetText()
}
// End of history.
if f.historyPointer == 0 {
@@ -69,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
}
// Iterate to least recent prompts.
f.historyPointer--
- f.InputField.SetText(f.history[f.historyPointer])
+ f.SetText(f.history[f.historyPointer])
default:
f.InputField.InputHandler()(event, func(tview.Primitive) {})
}
diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go
index 5f53ed287..a4d392ab3 100644
--- a/cmd/frostfs-lens/internal/tui/records.go
+++ b/cmd/frostfs-lens/internal/tui/records.go
@@ -8,6 +8,7 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@@ -62,10 +63,7 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx)
- tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
- if err != nil {
- return err
- }
+ tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() {
@@ -73,11 +71,12 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer {
if item.err != nil {
- v.ui.stopOnError(err)
+ v.ui.stopOnError(item.err)
break
}
record := item.val
+ var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil {
v.ui.stopOnError(err)
@@ -96,9 +95,7 @@ func (v *RecordsView) Mount(ctx context.Context) error {
}
func (v *RecordsView) Unmount() {
- if v.onUnmount == nil {
- panic("try to unmount not mounted component")
- }
+ assert.False(v.onUnmount == nil, "try to unmount not mounted component")
v.onUnmount()
v.onUnmount = nil
}
diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go
index bcc082821..cc6b7859e 100644
--- a/cmd/frostfs-lens/internal/tui/ui.go
+++ b/cmd/frostfs-lens/internal/tui/ui.go
@@ -460,11 +460,11 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
return
}
- switch ui.mountedPage.(type) {
+ switch v := ui.mountedPage.(type) {
case *BucketsView:
ui.moveNextPage(NewBucketsView(ui, res))
case *RecordsView:
- bucket := ui.mountedPage.(*RecordsView).bucket
+ bucket := v.bucket
ui.moveNextPage(NewRecordsView(ui, bucket, res))
}
@@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
}
- ui.Box.MouseHandler()
+ ui.MouseHandler()
}
func (ui *UI) WithPrompt(prompt string) error {
diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go
index d4e13b2a9..2d1ab3e33 100644
--- a/cmd/frostfs-lens/internal/tui/util.go
+++ b/cmd/frostfs-lens/internal/tui/util.go
@@ -3,12 +3,25 @@ package tui
import (
"errors"
"strings"
+ "time"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58"
+ "go.etcd.io/bbolt"
)
+func OpenDB(path string, writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(path, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ Timeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
+
func CIDParser(s string) (any, error) {
data, err := base58.Decode(s)
if err != nil {
diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go
index 6b7532b08..b7e4d7c96 100644
--- a/cmd/frostfs-lens/internal/writecache/tui.go
+++ b/cmd/frostfs-lens/internal/writecache/tui.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
- "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -43,7 +42,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -67,13 +66,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
-
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go
index ec737f8a0..2d52e0c56 100644
--- a/cmd/frostfs-node/accounting.go
+++ b/cmd/frostfs-node/accounting.go
@@ -3,19 +3,18 @@ package main
import (
"context"
"net"
+ "strings"
- accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
accountingTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/accounting/grpc"
accountingService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
accounting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting/morph"
+ accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc"
"google.golang.org/grpc"
)
func initAccountingService(ctx context.Context, c *cfg) {
- if c.cfgMorph.client == nil {
- initMorphComponents(ctx, c)
- }
+ c.initMorphComponents(ctx)
balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0)
fatalOnErr(err)
@@ -32,5 +31,27 @@ func initAccountingService(ctx context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
accountingGRPC.RegisterAccountingServiceServer(s, server)
+
+ // TODO(@aarifullin): #1487 remove the dual service support.
+ s.RegisterService(frostFSServiceDesc(accountingGRPC.AccountingService_ServiceDesc), server)
})
}
+
+// frostFSServiceDesc creates a service descriptor with the new namespace for dual service support.
+func frostFSServiceDesc(sd grpc.ServiceDesc) *grpc.ServiceDesc {
+ sdLegacy := new(grpc.ServiceDesc)
+ *sdLegacy = sd
+
+ const (
+ legacyNamespace = "neo.fs.v2"
+ apemanagerLegacyNamespace = "frostfs.v2"
+ newNamespace = "frost.fs"
+ )
+
+ if strings.HasPrefix(sd.ServiceName, legacyNamespace) {
+ sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, legacyNamespace, newNamespace)
+ } else if strings.HasPrefix(sd.ServiceName, apemanagerLegacyNamespace) {
+ sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, apemanagerLegacyNamespace, newNamespace)
+ }
+ return sdLegacy
+}
diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go
index 79c45c254..513314712 100644
--- a/cmd/frostfs-node/apemanager.go
+++ b/cmd/frostfs-node/apemanager.go
@@ -3,22 +3,23 @@ package main
import (
"net"
- apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc"
ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage"
morph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
apemanager_transport "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/apemanager/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager"
+ apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
"google.golang.org/grpc"
)
func initAPEManagerService(c *cfg) {
contractStorage := ape_contract.NewProxyVerificationContractStorage(
morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
- c.shared.key,
+ c.key,
c.cfgMorph.proxyScriptHash,
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
+ c.cfgMorph.client,
apemanager.WithLogger(c.log))
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)
@@ -26,5 +27,8 @@ func initAPEManagerService(c *cfg) {
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
apemanager_grpc.RegisterAPEManagerServiceServer(s, server)
+
+ // TODO(@aarifullin): #1487 remove the dual service support.
+ s.RegisterService(frostFSServiceDesc(apemanager_grpc.APEManagerService_ServiceDesc), server)
})
}
diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go
index 64c3beba7..ce8ae9662 100644
--- a/cmd/frostfs-node/attributes.go
+++ b/cmd/frostfs-node/attributes.go
@@ -6,9 +6,5 @@ import (
)
func parseAttributes(c *cfg) {
- if nodeconfig.Relay(c.appCfg) {
- return
- }
-
fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg)))
}
diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go
index 57f65d873..e5df0a22d 100644
--- a/cmd/frostfs-node/cache.go
+++ b/cmd/frostfs-node/cache.go
@@ -1,22 +1,30 @@
package main
import (
+ "bytes"
+ "cmp"
+ "context"
+ "slices"
"sync"
+ "sync/atomic"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- lru "github.com/hashicorp/golang-lru/v2"
"github.com/hashicorp/golang-lru/v2/expirable"
+ "github.com/hashicorp/golang-lru/v2/simplelru"
+ "go.uber.org/zap"
)
-type netValueReader[K any, V any] func(K) (V, error)
+type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error)
type valueWithError[V any] struct {
v V
@@ -49,7 +57,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
// updates the value from the network on cache miss or by TTL.
//
// returned value should not be modified.
-func (c *ttlNetCache[K, V]) get(key K) (V, error) {
+func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
hit := false
startedAt := time.Now()
defer func() {
@@ -71,7 +79,7 @@ func (c *ttlNetCache[K, V]) get(key K) (V, error) {
return val.v, val.e
}
- v, err := c.netRdr(key)
+ v, err := c.netRdr(ctx, key)
c.cache.Add(key, &valueWithError[V]{
v: v,
@@ -109,55 +117,6 @@ func (c *ttlNetCache[K, V]) remove(key K) {
hit = c.cache.Remove(key)
}
-// entity that provides LRU cache interface.
-type lruNetCache struct {
- cache *lru.Cache[uint64, *netmapSDK.NetMap]
-
- netRdr netValueReader[uint64, *netmapSDK.NetMap]
-
- metrics cacheMetrics
-}
-
-// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
-func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache {
- cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
- fatalOnErr(err)
-
- return &lruNetCache{
- cache: cache,
- netRdr: netRdr,
- metrics: metrics,
- }
-}
-
-// reads value by the key.
-//
-// updates the value from the network on cache miss.
-//
-// returned value should not be modified.
-func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
- hit := false
- startedAt := time.Now()
- defer func() {
- c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
- }()
-
- val, ok := c.cache.Get(key)
- if ok {
- hit = true
- return val, nil
- }
-
- val, err := c.netRdr(key)
- if err != nil {
- return nil, err
- }
-
- c.cache.Add(key, val)
-
- return val, nil
-}
-
// wrapper over TTL cache of values read from the network
// that implements container storage.
type ttlContainerStorage struct {
@@ -165,14 +124,12 @@ type ttlContainerStorage struct {
delInfoCache *ttlNetCache[cid.ID, *container.DelInfo]
}
-func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
- const containerCacheSize = 100
-
- lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
- return v.Get(id)
+func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
+ lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ return v.Get(ctx, id)
}, metrics.NewCacheMetrics("container"))
- lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) {
- return v.DeletionInfo(id)
+ lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ return v.DeletionInfo(ctx, id)
}, metrics.NewCacheMetrics("container_deletion_info"))
return ttlContainerStorage{
@@ -190,68 +147,245 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
// Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache.
-func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
- return s.containerCache.get(cnr)
+func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) {
+ return s.containerCache.get(ctx, cnr)
}
-func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
- return s.delInfoCache.get(cnr)
-}
-
-type ttlEACLStorage struct {
- *ttlNetCache[cid.ID, *container.EACL]
-}
-
-func newCachedEACLStorage(v container.EACLSource, ttl time.Duration) ttlEACLStorage {
- const eaclCacheSize = 100
-
- lruCnrCache := newNetworkTTLCache(eaclCacheSize, ttl, func(id cid.ID) (*container.EACL, error) {
- return v.GetEACL(id)
- }, metrics.NewCacheMetrics("eacl"))
-
- return ttlEACLStorage{lruCnrCache}
-}
-
-// GetEACL returns eACL value from the cache. If value is missing in the cache
-// or expired, then it returns value from side chain and updates cache.
-func (s ttlEACLStorage) GetEACL(cnr cid.ID) (*container.EACL, error) {
- return s.get(cnr)
-}
-
-// InvalidateEACL removes cached eACL value.
-func (s ttlEACLStorage) InvalidateEACL(cnr cid.ID) {
- s.remove(cnr)
+func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) {
+ return s.delInfoCache.get(ctx, cnr)
}
type lruNetmapSource struct {
netState netmap.State
- cache *lruNetCache
+ client rawSource
+ cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]]
+ mtx sync.RWMutex
+ metrics cacheMetrics
+ log *logger.Logger
+ candidates atomic.Pointer[[]netmapSDK.NodeInfo]
}
-func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
+type rawSource interface {
+ GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error)
+ GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error)
+}
+
+func newCachedNetmapStorage(ctx context.Context, log *logger.Logger,
+ netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration,
+) netmap.Source {
const netmapCacheSize = 10
- lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
- return v.GetNetMapByEpoch(key)
- }, metrics.NewCacheMetrics("netmap"))
+ cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil)
+ fatalOnErr(err)
- return &lruNetmapSource{
- netState: s,
- cache: lruNetmapCache,
+ src := &lruNetmapSource{
+ netState: netState,
+ client: client,
+ cache: cache,
+ log: log,
+ metrics: metrics.NewCacheMetrics("netmap"),
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ src.updateCandidates(ctx, d)
+ }()
+
+ return src
+}
+
+// updateCandidates routine to merge netmap in cache with candidates list.
+func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-timer.C:
+ newCandidates, err := s.client.GetCandidates(ctx)
+ if err != nil {
+ s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err))
+ timer.Reset(d)
+ break
+ }
+ if len(newCandidates) == 0 {
+ s.candidates.Store(&newCandidates)
+ timer.Reset(d)
+ break
+ }
+ slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
+ return cmp.Compare(n1.Hash(), n2.Hash())
+ })
+
+ // Check once state changed
+ v := s.candidates.Load()
+ if v == nil {
+ s.candidates.Store(&newCandidates)
+ s.mergeCacheWithCandidates(newCandidates)
+ timer.Reset(d)
+ break
+ }
+ ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
+ if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) ||
+ uint32(n1.Status()) != uint32(n2.Status()) ||
+ slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 {
+ return 1
+ }
+ ne1 := slices.Collect(n1.NetworkEndpoints())
+ ne2 := slices.Collect(n2.NetworkEndpoints())
+ return slices.Compare(ne1, ne2)
+ })
+ if ret != 0 {
+ s.candidates.Store(&newCandidates)
+ s.mergeCacheWithCandidates(newCandidates)
+ }
+ timer.Reset(d)
+ }
}
}
-func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
- return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
+func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) {
+ s.mtx.Lock()
+ tmp := s.cache.Values()
+ s.mtx.Unlock()
+ for _, pointer := range tmp {
+ nm := pointer.Load()
+ updates := getNetMapNodesToUpdate(nm, candidates)
+ if len(updates) > 0 {
+ nm = nm.Clone()
+ mergeNetmapWithCandidates(updates, nm)
+ pointer.Store(nm)
+ }
+ }
}
-func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
- return s.getNetMapByEpoch(epoch)
+// reads value by the key.
+//
+// updates the value from the network on cache miss.
+//
+// returned value should not be modified.
+func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
+ hit := false
+ startedAt := time.Now()
+ defer func() {
+ s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
+ }()
+
+ s.mtx.RLock()
+ val, ok := s.cache.Get(key)
+ s.mtx.RUnlock()
+ if ok {
+ hit = true
+ return val.Load(), nil
+ }
+
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ val, ok = s.cache.Get(key)
+ if ok {
+ hit = true
+ return val.Load(), nil
+ }
+
+ nm, err := s.client.GetNetMapByEpoch(ctx, key)
+ if err != nil {
+ return nil, err
+ }
+ v := s.candidates.Load()
+ if v != nil {
+ updates := getNetMapNodesToUpdate(nm, *v)
+ if len(updates) > 0 {
+ mergeNetmapWithCandidates(updates, nm)
+ }
+ }
+
+ p := atomic.Pointer[netmapSDK.NetMap]{}
+ p.Store(nm)
+ s.cache.Add(key, &p)
+
+ return nm, nil
}
-func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
- val, err := s.cache.get(epoch)
+// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates.
+func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) {
+ for _, v := range updates {
+ if v.status != netmapSDK.UnspecifiedState {
+ nm.Nodes()[v.netmapIndex].SetStatus(v.status)
+ }
+ if v.externalAddresses != nil {
+ nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...)
+ }
+ if v.endpoints != nil {
+ nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...)
+ }
+ }
+}
+
+type nodeToUpdate struct {
+ netmapIndex int
+ status netmapSDK.NodeState
+ externalAddresses []string
+ endpoints []string
+}
+
+// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates.
+func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate {
+ var res []nodeToUpdate
+ for i := range nm.Nodes() {
+ for _, cnd := range candidates {
+ if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) {
+ var tmp nodeToUpdate
+ var update bool
+
+ if cnd.Status() != nm.Nodes()[i].Status() &&
+ (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) {
+ update = true
+ tmp.status = cnd.Status()
+ }
+
+ externalAddresses := cnd.ExternalAddresses()
+ if externalAddresses != nil &&
+ slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 {
+ update = true
+ tmp.externalAddresses = externalAddresses
+ }
+
+ nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints())
+ nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints())
+ candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints())
+ candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints())
+ if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 {
+ update = true
+ tmp.endpoints = candidateEndpoints
+ }
+
+ if update {
+ tmp.netmapIndex = i
+ res = append(res, tmp)
+ }
+
+ break
+ }
+ }
+ }
+ return res
+}
+
+func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
+ return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff)
+}
+
+func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
+ return s.getNetMapByEpoch(ctx, epoch)
+}
+
+func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
+ val, err := s.get(ctx, epoch)
if err != nil {
return nil, err
}
@@ -259,7 +393,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, err
return val, nil
}
-func (s *lruNetmapSource) Epoch() (uint64, error) {
+func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) {
return s.netState.CurrentEpoch(), nil
}
@@ -267,7 +401,10 @@ type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte]
}
-func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
+func newCachedIRFetcher(f interface {
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
+},
+) cachedIRFetcher {
const (
irFetcherCacheSize = 1 // we intend to store only one value
@@ -281,8 +418,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached
)
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
- func(_ struct{}) ([][]byte, error) {
- return f.InnerRingKeys()
+ func(ctx context.Context, _ struct{}) ([][]byte, error) {
+ return f.InnerRingKeys(ctx)
}, metrics.NewCacheMetrics("ir_keys"),
)
@@ -292,8 +429,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates
// the cache.
-func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
- val, err := f.get(struct{}{})
+func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) {
+ val, err := f.get(ctx, struct{}{})
if err != nil {
return nil, err
}
@@ -316,7 +453,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M
}
}
-func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
+func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
const ttl = time.Second * 30
hit := false
@@ -338,7 +475,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
c.mtx.Lock()
size = c.lastSize
if !c.lastUpdated.After(prevUpdated) {
- size = c.src.MaxObjectSize()
+ size = c.src.MaxObjectSize(ctx)
c.lastSize = size
c.lastUpdated = time.Now()
}
diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go
index f8c324a2f..24286826f 100644
--- a/cmd/frostfs-node/cache_test.go
+++ b/cmd/frostfs-node/cache_test.go
@@ -1,10 +1,13 @@
package main
import (
+ "context"
"errors"
+ "sync"
"testing"
"time"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/stretchr/testify/require"
)
@@ -17,7 +20,7 @@ func TestTTLNetCache(t *testing.T) {
t.Run("Test Add and Get", func(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
- val, err := cache.get(key)
+ val, err := cache.get(context.Background(), key)
require.NoError(t, err)
require.Equal(t, ti, val)
})
@@ -26,7 +29,7 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
time.Sleep(2 * ttlDuration)
- val, err := cache.get(key)
+ val, err := cache.get(context.Background(), key)
require.NoError(t, err)
require.NotEqual(t, val, ti)
})
@@ -35,20 +38,20 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
cache.remove(key)
- val, err := cache.get(key)
+ val, err := cache.get(context.Background(), key)
require.NoError(t, err)
require.NotEqual(t, val, ti)
})
t.Run("Test Cache Error", func(t *testing.T) {
cache.set("error", time.Now(), errors.New("mock error"))
- _, err := cache.get("error")
+ _, err := cache.get(context.Background(), "error")
require.Error(t, err)
require.Equal(t, "mock error", err.Error())
})
}
-func testNetValueReader(key string) (time.Time, error) {
+func testNetValueReader(_ context.Context, key string) (time.Time, error) {
if key == "error" {
return time.Now(), errors.New("mock error")
}
@@ -58,3 +61,75 @@ func testNetValueReader(key string) (time.Time, error) {
type noopCacheMetricts struct{}
func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {}
+
+type rawSrc struct{}
+
+func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) {
+ node0 := netmapSDK.NodeInfo{}
+ node0.SetPublicKey([]byte{byte(1)})
+ node0.SetStatus(netmapSDK.Online)
+ node0.SetExternalAddresses("1", "0")
+ node0.SetNetworkEndpoints("1", "0")
+
+ node1 := netmapSDK.NodeInfo{}
+ node1.SetPublicKey([]byte{byte(1)})
+ node1.SetStatus(netmapSDK.Online)
+ node1.SetExternalAddresses("1", "0")
+ node1.SetNetworkEndpoints("1", "0")
+
+ return []netmapSDK.NodeInfo{node0, node1}, nil
+}
+
+func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
+ nm := netmapSDK.NetMap{}
+ nm.SetEpoch(1)
+
+ node0 := netmapSDK.NodeInfo{}
+ node0.SetPublicKey([]byte{byte(1)})
+ node0.SetStatus(netmapSDK.Maintenance)
+ node0.SetExternalAddresses("0")
+ node0.SetNetworkEndpoints("0")
+
+ node1 := netmapSDK.NodeInfo{}
+ node1.SetPublicKey([]byte{byte(1)})
+ node1.SetStatus(netmapSDK.Maintenance)
+ node1.SetExternalAddresses("0")
+ node1.SetNetworkEndpoints("0")
+
+ nm.SetNodes([]netmapSDK.NodeInfo{node0, node1})
+
+ return &nm, nil
+}
+
+type st struct{}
+
+func (s *st) CurrentEpoch() uint64 {
+ return 1
+}
+
+func TestNetmapStorage(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ wg := sync.WaitGroup{}
+ cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50)
+
+ nm, err := cache.GetNetMapByEpoch(ctx, 1)
+ require.NoError(t, err)
+ require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance)
+ require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1)
+ require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1)
+
+ require.Eventually(t, func() bool {
+ nm, err := cache.GetNetMapByEpoch(ctx, 1)
+ require.NoError(t, err)
+ for _, node := range nm.Nodes() {
+ if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 &&
+ node.NumberOfNetworkEndpoints() == 2) {
+ return false
+ }
+ }
+ return true
+ }, time.Second*5, time.Millisecond*10)
+
+ cancel()
+ wg.Wait()
+}
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index ed3a65c25..96274e625 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -15,7 +15,6 @@ import (
"syscall"
"time"
- netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/audit"
@@ -26,18 +25,23 @@ import (
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
+ treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -56,6 +60,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone"
tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
@@ -67,6 +72,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
+ netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -103,11 +110,17 @@ type applicationConfiguration struct {
level string
destination string
timestamp bool
+ options []zap.Option
+ tags [][]string
+ }
+
+ ObjectCfg struct {
+ tombstoneLifetime uint64
+ priorityMetrics []placement.Metric
}
EngineCfg struct {
errorThreshold uint32
- shardPoolSize uint32
shards []shardCfg
lowMem bool
}
@@ -117,15 +130,13 @@ type applicationConfiguration struct {
}
type shardCfg struct {
- compress bool
- estimateCompressibility bool
- estimateCompressibilityThreshold float64
+ compression compression.Config
smallSizeObjectLimit uint64
- uncompressableContentType []string
refillMetabase bool
refillMetabaseWorkersCount int
mode shardmode.Mode
+ limiter qos.Limiter
metaCfg struct {
path string
@@ -222,49 +233,71 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c)
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
+ var opts []zap.Option
+ if loggerconfig.ToLokiConfig(c).Enabled {
+ opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
+ lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
+ return lokiCore
+ })}
+ }
+ a.LoggerCfg.options = opts
+ a.LoggerCfg.tags = loggerconfig.Tags(c)
+
+ // Object
+
+ a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c)
+ locodeDBPath := nodeconfig.LocodeDBPath(c)
+ parser, err := placement.NewMetricsParser(locodeDBPath)
+ if err != nil {
+ return fmt.Errorf("metrics parser creation: %w", err)
+ }
+ m, err := parser.ParseMetrics(objectconfig.Get(c).Priority())
+ if err != nil {
+ return fmt.Errorf("parse metrics: %w", err)
+ }
+ a.ObjectCfg.priorityMetrics = m
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
- a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
}
-func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
- var newConfig shardCfg
+func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error {
+ var target shardCfg
- newConfig.refillMetabase = oldConfig.RefillMetabase()
- newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
- newConfig.mode = oldConfig.Mode()
- newConfig.compress = oldConfig.Compress()
- newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
- newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
- newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
- newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
+ target.refillMetabase = source.RefillMetabase()
+ target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount()
+ target.mode = source.Mode()
+ target.compression = source.Compression()
+ target.smallSizeObjectLimit = source.SmallSizeLimit()
- a.setShardWriteCacheConfig(&newConfig, oldConfig)
+ a.setShardWriteCacheConfig(&target, source)
- a.setShardPiloramaConfig(c, &newConfig, oldConfig)
+ a.setShardPiloramaConfig(c, &target, source)
- if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
+ if err := a.setShardStorageConfig(&target, source); err != nil {
return err
}
- a.setMetabaseConfig(&newConfig, oldConfig)
+ a.setMetabaseConfig(&target, source)
- a.setGCConfig(&newConfig, oldConfig)
+ a.setGCConfig(&target, source)
+ if err := a.setLimiter(&target, source); err != nil {
+ return err
+ }
- a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
+ a.EngineCfg.shards = append(a.EngineCfg.shards, target)
return nil
}
-func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
- writeCacheCfg := oldConfig.WriteCache()
+func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) {
+ writeCacheCfg := source.WriteCache()
if writeCacheCfg.Enabled() {
- wc := &newConfig.writecacheCfg
+ wc := &target.writecacheCfg
wc.enabled = true
wc.path = writeCacheCfg.Path()
@@ -277,10 +310,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
}
}
-func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
+func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) {
if config.BoolSafe(c.Sub("tree"), "enabled") {
- piloramaCfg := oldConfig.Pilorama()
- pr := &newConfig.piloramaCfg
+ piloramaCfg := source.Pilorama()
+ pr := &target.piloramaCfg
pr.enabled = true
pr.path = piloramaCfg.Path()
@@ -291,8 +324,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newC
}
}
-func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
- blobStorCfg := oldConfig.BlobStor()
+func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error {
+ blobStorCfg := source.BlobStor()
storagesCfg := blobStorCfg.Storages()
ss := make([]subStorageCfg, 0, len(storagesCfg))
@@ -326,13 +359,13 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol
ss = append(ss, sCfg)
}
- newConfig.subStorages = ss
+ target.subStorages = ss
return nil
}
-func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
- metabaseCfg := oldConfig.Metabase()
- m := &newConfig.metaCfg
+func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) {
+ metabaseCfg := source.Metabase()
+ m := &target.metaCfg
m.path = metabaseCfg.Path()
m.perm = metabaseCfg.BoltDB().Perm()
@@ -340,12 +373,22 @@ func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldCon
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
}
-func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
- gcCfg := oldConfig.GC()
- newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
- newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
- newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
- newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
+func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) {
+ gcCfg := source.GC()
+ target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
+ target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
+ target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
+ target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
+}
+
+func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
+ limitsConfig := source.Limits().ToConfig()
+ limiter, err := qos.NewLimiter(limitsConfig)
+ if err != nil {
+ return err
+ }
+ target.limiter = limiter
+ return nil
}
// internals contains application-specific internals that are created
@@ -376,16 +419,16 @@ type internals struct {
}
// starts node's maintenance.
-func (c *cfg) startMaintenance() {
+func (c *cfg) startMaintenance(ctx context.Context) {
c.isMaintenance.Store(true)
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
- c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
+ c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance)
}
// stops node's maintenance.
-func (c *internals) stopMaintenance() {
+func (c *internals) stopMaintenance(ctx context.Context) {
if c.isMaintenance.CompareAndSwap(true, false) {
- c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
+ c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance)
}
}
@@ -428,12 +471,13 @@ type shared struct {
metricsCollector *metrics.NodeMetrics
metricsSvc *objectService.MetricCollector
+
+ dialerSource *internalNet.DialerSource
}
// dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations.
type dynamicConfiguration struct {
- logger *logger.Prm
pprof *httpComponent
metrics *httpComponent
}
@@ -470,6 +514,7 @@ type cfg struct {
cfgNetmap cfgNetmap
cfgControlService cfgControlService
cfgObject cfgObject
+ cfgQoSService cfgQoSService
}
// ReadCurrentNetMap reads network map which has been cached at the
@@ -504,6 +549,8 @@ type cfgGRPC struct {
maxChunkSize uint64
maxAddrAmount uint64
reconnectTimeout time.Duration
+
+ limiter atomic.Pointer[limiting.SemaphoreLimiter]
}
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
@@ -563,13 +610,16 @@ func (c *cfgGRPC) dropConnection(endpoint string) {
}
type cfgMorph struct {
- client *client.Client
+ initialized bool
+ guard sync.Mutex
- notaryEnabled bool
+ client *client.Client
// TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration
+ containerCacheSize uint32
+
proxyScriptHash neogoutil.Uint160
}
@@ -580,9 +630,10 @@ type cfgAccounting struct {
type cfgContainer struct {
scriptHash neogoutil.Uint160
- parsers map[event.Type]event.NotificationParser
- subscribers map[event.Type][]event.Handler
- workerPool util.WorkerPool // pool for asynchronous handlers
+ parsers map[event.Type]event.NotificationParser
+ subscribers map[event.Type][]event.Handler
+ workerPool util.WorkerPool // pool for asynchronous handlers
+ containerBatchSize uint32
}
type cfgFrostfsID struct {
@@ -600,9 +651,7 @@ type cfgNetmap struct {
state *networkState
- needBootstrap bool
reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime
- startEpoch uint64 // epoch number when application is started
}
type cfgNodeInfo struct {
@@ -615,15 +664,13 @@ type cfgObject struct {
cnrSource container.Source
- eaclSource container.EACLSource
-
cfgAccessPolicyEngine cfgAccessPolicyEngine
pool cfgObjectRoutines
cfgLocalStorage cfgLocalStorage
- tombstoneLifetime uint64
+ tombstoneLifetime *atomic.Uint64
skipSessionTokenIssuerVerification bool
}
@@ -639,10 +686,6 @@ type cfgAccessPolicyEngine struct {
}
type cfgObjectRoutines struct {
- putRemote *ants.Pool
-
- putLocal *ants.Pool
-
replication *ants.Pool
}
@@ -666,11 +709,9 @@ func initCfg(appCfg *config.Config) *cfg {
key := nodeconfig.Key(appCfg)
- relayOnly := nodeconfig.Relay(appCfg)
-
netState := newNetworkState()
- c.shared = initShared(appCfg, key, netState, relayOnly)
+ c.shared = initShared(appCfg, key, netState)
netState.metrics = c.metricsCollector
@@ -679,12 +720,7 @@ func initCfg(appCfg *config.Config) *cfg {
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
- if loggerconfig.ToLokiConfig(appCfg).Enabled {
- log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
- lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
- return lokiCore
- }))
- }
+ logger.UpdateLevelForTags(logPrm)
c.internals = initInternals(appCfg, log)
@@ -695,7 +731,7 @@ func initCfg(appCfg *config.Config) *cfg {
c.cfgFrostfsID = initFrostfsID(appCfg)
- c.cfgNetmap = initNetmap(appCfg, netState, relayOnly)
+ c.cfgNetmap = initNetmap(appCfg, netState)
c.cfgGRPC = initCfgGRPC()
@@ -741,22 +777,24 @@ func initSdNotify(appCfg *config.Config) bool {
return false
}
-func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared {
- var netAddr network.AddressGroup
-
- if !relayOnly {
- netAddr = nodeconfig.BootstrapAddresses(appCfg)
- }
+func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared {
+ netAddr := nodeconfig.BootstrapAddresses(appCfg)
persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path())
fatalOnErr(err)
+ nodeMetrics := metrics.NewNodeMetrics()
+
+ ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg, nodeMetrics.MultinetMetrics()))
+ fatalOnErr(err)
+
cacheOpts := cache.ClientCacheOpts{
DialTimeout: apiclientconfig.DialTimeout(appCfg),
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
Key: &key.PrivateKey,
AllowExternal: apiclientconfig.AllowExternal(appCfg),
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
+ DialerSource: ds,
}
return shared{
@@ -768,22 +806,38 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
bgClientCache: cache.NewSDKClientCache(cacheOpts),
putClientCache: cache.NewSDKClientCache(cacheOpts),
persistate: persistate,
- metricsCollector: metrics.NewNodeMetrics(),
+ metricsCollector: nodeMetrics,
+ dialerSource: ds,
}
}
-func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap {
+func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) internalNet.Config {
+ result := internalNet.Config{
+ Enabled: multinet.Enabled(appCfg),
+ Balancer: multinet.Balancer(appCfg),
+ Restrict: multinet.Restrict(appCfg),
+ FallbackDelay: multinet.FallbackDelay(appCfg),
+ Metrics: m,
+ }
+ sn := multinet.Subnets(appCfg)
+ for _, s := range sn {
+ result.Subnets = append(result.Subnets, internalNet.Subnet{
+ Prefix: s.Mask,
+ SourceIPs: s.SourceIPs,
+ })
+ }
+ return result
+}
+
+func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap {
netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
fatalOnErr(err)
- var reBootstrapTurnedOff atomic.Bool
- reBootstrapTurnedOff.Store(relayOnly)
return cfgNetmap{
scriptHash: contractsconfig.Netmap(appCfg),
state: netState,
workerPool: netmapWorkerPool,
- needBootstrap: !relayOnly,
- reBoostrapTurnedOff: &reBootstrapTurnedOff,
+ reBoostrapTurnedOff: &atomic.Bool{},
}
}
@@ -803,20 +857,22 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
}
}
-func initCfgGRPC() cfgGRPC {
- maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
- maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
+func initCfgGRPC() (cfg cfgGRPC) {
+ maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
+ maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
- return cfgGRPC{
- maxChunkSize: maxChunkSize,
- maxAddrAmount: maxAddrAmount,
- }
+ cfg.maxChunkSize = maxChunkSize
+ cfg.maxAddrAmount = maxAddrAmount
+
+ return
}
func initCfgObject(appCfg *config.Config) cfgObject {
+ var tsLifetime atomic.Uint64
+ tsLifetime.Store(objectconfig.TombstoneLifetime(appCfg))
return cfgObject{
pool: initObjectPool(appCfg),
- tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg),
+ tombstoneLifetime: &tsLifetime,
skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(),
}
}
@@ -825,9 +881,8 @@ func (c *cfg) engineOpts() []engine.Option {
var opts []engine.Option
opts = append(opts,
- engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
- engine.WithLogger(c.log),
+ engine.WithLogger(c.log.WithTag(logger.TagEngine)),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
)
@@ -864,7 +919,8 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithMaxCacheSize(wcRead.sizeLimit),
writecache.WithMaxCacheCount(wcRead.countLimit),
writecache.WithNoSync(wcRead.noSync),
- writecache.WithLogger(c.log),
+ writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)),
+ writecache.WithQoSLimiter(shCfg.limiter),
)
}
return writeCacheOpts
@@ -903,7 +959,8 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval),
blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount),
blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout),
- blobovniczatree.WithLogger(c.log),
+ blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)),
+ blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)),
blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit),
}
@@ -926,7 +983,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
fstree.WithPerm(sRead.perm),
fstree.WithDepth(sRead.depth),
fstree.WithNoSync(sRead.noSync),
- fstree.WithLogger(c.log),
+ fstree.WithLogger(c.log.WithTag(logger.TagFSTree)),
}
if c.metricsCollector != nil {
fstreeOpts = append(fstreeOpts,
@@ -956,12 +1013,9 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
ss := c.getSubstorageOpts(ctx, shCfg)
blobstoreOpts := []blobstor.Option{
- blobstor.WithCompressObjects(shCfg.compress),
- blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType),
- blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility),
- blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold),
+ blobstor.WithCompression(shCfg.compression),
blobstor.WithStorages(ss),
- blobstor.WithLogger(c.log),
+ blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)),
}
if c.metricsCollector != nil {
blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore())))
@@ -980,12 +1034,13 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
}
if c.metricsCollector != nil {
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
+ shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
}
var sh shardOptsWithID
sh.configID = shCfg.id()
sh.shOpts = []shard.Option{
- shard.WithLogger(c.log),
+ shard.WithLogger(c.log.WithTag(logger.TagShard)),
shard.WithRefillMetabase(shCfg.refillMetabase),
shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount),
shard.WithMode(shCfg.mode),
@@ -1004,30 +1059,33 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return pool
}),
+ shard.WithLimiter(shCfg.limiter),
}
return sh
}
-func (c *cfg) loggerPrm() (*logger.Prm, error) {
- // check if it has been inited before
- if c.dynamicConfiguration.logger == nil {
- c.dynamicConfiguration.logger = new(logger.Prm)
- }
-
+func (c *cfg) loggerPrm() (logger.Prm, error) {
+ var prm logger.Prm
// (re)init read configuration
- err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
+ err := prm.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
- panic("incorrect log level format: " + c.LoggerCfg.level)
+ return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
}
- err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
+ err = prm.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
- panic("incorrect log destination format: " + c.LoggerCfg.destination)
+ return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
+ }
+ prm.PrependTimestamp = c.LoggerCfg.timestamp
+ prm.Options = c.LoggerCfg.options
+ err = prm.SetTags(c.LoggerCfg.tags)
+ if err != nil {
+ // not expected since validation should be performed before
+ return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination)
}
- c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
- return c.dynamicConfiguration.logger, nil
+ return prm, nil
}
func (c *cfg) LocalAddress() network.AddressGroup {
@@ -1037,7 +1095,7 @@ func (c *cfg) LocalAddress() network.AddressGroup {
func initLocalStorage(ctx context.Context, c *cfg) {
ls := engine.New(c.engineOpts()...)
- addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
+ addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber())
})
@@ -1047,12 +1105,14 @@ func initLocalStorage(ctx context.Context, c *cfg) {
var shardsAttached int
for _, optsWithMeta := range c.shardOpts(ctx) {
- id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))...)
+ id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts,
+ shard.WithTombstoneSource(c.createTombstoneSource()),
+ shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
if err != nil {
- c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
shardsAttached++
- c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
+ c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
}
}
if shardsAttached == 0 {
@@ -1062,27 +1122,26 @@ func initLocalStorage(ctx context.Context, c *cfg) {
c.cfgObject.cfgLocalStorage.localStorage = ls
c.onShutdown(func() {
- c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
+ c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
err := ls.Close(context.WithoutCancel(ctx))
if err != nil {
- c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
- zap.String("error", err.Error()),
+ c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
+ zap.Error(err),
)
} else {
- c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
+ c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
}
})
}
-func initAccessPolicyEngine(_ context.Context, c *cfg) {
+func initAccessPolicyEngine(ctx context.Context, c *cfg) {
var localOverrideDB chainbase.LocalOverrideDatabase
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
- c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
+ c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
} else {
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
- chainbase.WithLogger(c.log),
chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()),
chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()),
chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()),
@@ -1095,7 +1154,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
- if cacheSize > 0 {
+ if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
}
@@ -1104,7 +1163,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
c.onShutdown(func() {
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
- c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure,
+ c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure,
zap.Error(err),
)
}
@@ -1114,38 +1173,22 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
var err error
- optNonBlocking := ants.WithNonblocking(true)
-
- putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
- pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
- fatalOnErr(err)
-
- putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
- pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
- fatalOnErr(err)
-
replicatorPoolSize := replicatorconfig.PoolSize(cfg)
- if replicatorPoolSize <= 0 {
- replicatorPoolSize = putRemoteCapacity
- }
-
pool.replication, err = ants.NewPool(replicatorPoolSize)
fatalOnErr(err)
return pool
}
-func (c *cfg) LocalNodeInfo() (*netmapV2.NodeInfo, error) {
- var res netmapV2.NodeInfo
-
+func (c *cfg) LocalNodeInfo() *netmap.NodeInfo {
+ var res netmap.NodeInfo
ni, ok := c.cfgNetmap.state.getNodeInfo()
if ok {
- ni.WriteToV2(&res)
+ res = ni
} else {
- c.cfgNodeInfo.localInfo.WriteToV2(&res)
+ res = c.cfgNodeInfo.localInfo
}
-
- return &res, nil
+ return &res
}
// setContractNodeInfo rewrites local node info from the FrostFS network map.
@@ -1155,12 +1198,12 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
c.cfgNetmap.state.setNodeInfo(ni)
}
-func (c *cfg) updateContractNodeInfo(epoch uint64) {
- ni, err := c.netmapLocalNodeState(epoch)
+func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
+ ni, err := c.netmapLocalNodeState(ctx, epoch)
if err != nil {
- c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
+ c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
- zap.String("error", err.Error()))
+ zap.Error(err))
return
}
@@ -1170,42 +1213,37 @@ func (c *cfg) updateContractNodeInfo(epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil.
-func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
+func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
ni := c.cfgNodeInfo.localInfo
- stateSetter(&ni)
+ ni.SetStatus(state)
prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni)
- return c.cfgNetmap.wrapper.AddPeer(prm)
+ return c.cfgNetmap.wrapper.AddPeer(ctx, prm)
}
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
-func bootstrapOnline(c *cfg) error {
- return c.bootstrapWithState((*netmap.NodeInfo).SetOnline)
+func bootstrapOnline(ctx context.Context, c *cfg) error {
+ return c.bootstrapWithState(ctx, netmap.Online)
}
// bootstrap calls bootstrapWithState with:
// - "maintenance" state if maintenance is in progress on the current node
// - "online", otherwise
-func (c *cfg) bootstrap() error {
+func (c *cfg) bootstrap(ctx context.Context) error {
// switch to online except when under maintenance
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
- c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
- return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
+ c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
+ return c.bootstrapWithState(ctx, netmap.Maintenance)
}
- c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
+ c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
zap.Stringer("previous", st),
)
- return bootstrapOnline(c)
-}
-
-// needBootstrap checks if local node should be registered in network on bootup.
-func (c *cfg) needBootstrap() bool {
- return c.cfgNetmap.needBootstrap
+ return bootstrapOnline(ctx, c)
}
type dCmp struct {
@@ -1225,19 +1263,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
// signals causing application to shut down should have priority over
// reconfiguration signal
case <-ch:
- c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
- c.shutdown()
+ c.shutdown(ctx)
- c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-c.internalErr: // internal application error
- c.log.Warn(logs.FrostFSNodeInternalApplicationError,
+ c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
- c.shutdown()
+ c.shutdown(ctx)
- c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
+ c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
default:
// block until any signal is receieved
@@ -1245,19 +1283,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
case <-sighupCh:
c.reloadConfig(ctx)
case <-ch:
- c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
- c.shutdown()
+ c.shutdown(ctx)
- c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-c.internalErr: // internal application error
- c.log.Warn(logs.FrostFSNodeInternalApplicationError,
+ c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
- c.shutdown()
+ c.shutdown(ctx)
- c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
+ c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
}
}
@@ -1265,64 +1303,74 @@ func (c *cfg) signalWatcher(ctx context.Context) {
}
func (c *cfg) reloadConfig(ctx context.Context) {
- c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
+ c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
- if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
- c.log.Info(logs.FrostFSNodeSIGHUPSkip)
+ if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
+ c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
return
}
- defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
+ defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
err := c.reloadAppConfig()
if err != nil {
- c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
return
}
// all the components are expected to support
// Logger's dynamic reconfiguration approach
- // Logger
+ components := c.getComponents(ctx)
- logPrm, err := c.loggerPrm()
- if err != nil {
- c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
- return
- }
-
- components := c.getComponents(ctx, logPrm)
+ // Object
+ c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
// Storage Engine
var rcfg engine.ReConfiguration
for _, optsWithID := range c.shardOpts(ctx) {
- rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
+ rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts,
+ shard.WithTombstoneSource(c.createTombstoneSource()),
+ shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)),
+ ))
}
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
if err != nil {
- c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
return
}
for _, component := range components {
err = component.reloadFunc()
if err != nil {
- c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
+ c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
zap.String("component", component.name),
zap.Error(err))
}
}
- c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+ if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
+ c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
+ return
+ }
+
+ c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
-func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
+func (c *cfg) getComponents(ctx context.Context) []dCmp {
var components []dCmp
- components = append(components, dCmp{"logger", logPrm.Reload})
+ components = append(components, dCmp{"logger", func() error {
+ prm, err := c.loggerPrm()
+ if err != nil {
+ return err
+ }
+ logger.UpdateLevelForTags(prm)
+ return nil
+ }})
components = append(components, dCmp{"runtime", func() error {
- setRuntimeParameters(c)
+ setRuntimeParameters(ctx, c)
return nil
}})
components = append(components, dCmp{"audit", func() error {
@@ -1337,10 +1385,16 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
}
updated, err := tracing.Setup(ctx, *traceConfig)
if updated {
- c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
+ c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
}
return err
}})
+ if c.treeService != nil {
+ components = append(components, dCmp{"tree", func() error {
+ c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys())
+ return nil
+ }})
+ }
if cmp, updated := metricsComponent(c); updated {
if cmp.enabled {
cmp.preReload = enableMetricsSvc
@@ -1353,17 +1407,13 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
+ components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
+
return components
}
func (c *cfg) reloadPools() error {
- newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
- c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
-
- newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
- c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
-
- newSize = replicatorconfig.PoolSize(c.appCfg)
+ newSize := replicatorconfig.PoolSize(c.appCfg)
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
return nil
@@ -1372,7 +1422,7 @@ func (c *cfg) reloadPools() error {
func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
oldSize := p.Cap()
if oldSize != newSize {
- c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
+ c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
zap.Int("old", oldSize), zap.Int("new", newSize))
p.Tune(newSize)
}
@@ -1397,14 +1447,25 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
return tombstoneSource
}
-func (c *cfg) shutdown() {
- old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
+func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
+ return container.NewInfoProvider(func() (container.Source, error) {
+ c.initMorphComponents(ctx)
+ cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
+ if err != nil {
+ return nil, err
+ }
+ return containerClient.AsContainerSource(cc), nil
+ })
+}
+
+func (c *cfg) shutdown(ctx context.Context) {
+ old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
if old == control.HealthStatus_SHUTTING_DOWN {
- c.log.Info(logs.FrostFSNodeShutdownSkip)
+ c.log.Info(ctx, logs.FrostFSNodeShutdownSkip)
return
}
if old == control.HealthStatus_STARTING {
- c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
+ c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady)
}
c.ctxCancel()
@@ -1414,6 +1475,6 @@ func (c *cfg) shutdown() {
}
if err := sdnotify.ClearStatus(); err != nil {
- c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+ c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}
diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go
index 36e53ea7c..c40bf3620 100644
--- a/cmd/frostfs-node/config/calls.go
+++ b/cmd/frostfs-node/config/calls.go
@@ -1,6 +1,7 @@
package config
import (
+ "slices"
"strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@@ -52,6 +53,5 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used
// to provide default values.
func (x *Config) SetDefault(from *Config) {
- x.defaultPath = make([]string, len(from.path))
- copy(x.defaultPath, from.path)
+ x.defaultPath = slices.Clone(from.path)
}
diff --git a/cmd/frostfs-node/config/calls_test.go b/cmd/frostfs-node/config/calls_test.go
index 68bf1c679..bc149eb7d 100644
--- a/cmd/frostfs-node/config/calls_test.go
+++ b/cmd/frostfs-node/config/calls_test.go
@@ -1,7 +1,6 @@
package config_test
import (
- "os"
"strings"
"testing"
@@ -38,8 +37,7 @@ func TestConfigEnv(t *testing.T) {
envName := strings.ToUpper(
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
- err := os.Setenv(envName, value)
- require.NoError(t, err)
+ t.Setenv(envName, value)
c := configtest.EmptyConfig()
diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go
index 35dae97d9..ee9d4268b 100644
--- a/cmd/frostfs-node/config/configdir_test.go
+++ b/cmd/frostfs-node/config/configdir_test.go
@@ -12,13 +12,10 @@ import (
func TestConfigDir(t *testing.T) {
dir := t.TempDir()
- cfgFileName0 := path.Join(dir, "cfg_00.json")
- cfgFileName1 := path.Join(dir, "cfg_01.yml")
+ cfgFileName := path.Join(dir, "cfg_01.yml")
- require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
- require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
+ require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777))
c := New("", dir, "")
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
- require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
}
diff --git a/cmd/frostfs-node/config/container/container.go b/cmd/frostfs-node/config/container/container.go
new file mode 100644
index 000000000..1cd64a6f8
--- /dev/null
+++ b/cmd/frostfs-node/config/container/container.go
@@ -0,0 +1,27 @@
+package containerconfig
+
+import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+
+const (
+ subsection = "container"
+ listStreamSubsection = "list_stream"
+
+ // ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
+ ContainerBatchSizeDefault = 1000
+)
+
+// ContainerBatchSize returns the value of "batch_size" config parameter
+// from "list_stream" subsection of "container" section.
+//
+// Returns ContainerBatchSizeDefault if the value is missing or if
+// the value is not positive integer.
+func ContainerBatchSize(c *config.Config) uint32 {
+ if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
+ return ContainerBatchSizeDefault
+ }
+ size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
+ if size == 0 {
+ return ContainerBatchSizeDefault
+ }
+ return size
+}
diff --git a/cmd/frostfs-node/config/container/container_test.go b/cmd/frostfs-node/config/container/container_test.go
new file mode 100644
index 000000000..744cd3295
--- /dev/null
+++ b/cmd/frostfs-node/config/container/container_test.go
@@ -0,0 +1,27 @@
+package containerconfig_test
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestContainerSection(t *testing.T) {
+ t.Run("defaults", func(t *testing.T) {
+ empty := configtest.EmptyConfig()
+ require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
+ })
+
+ const path = "../../../../config/example/node"
+ fileConfigTest := func(c *config.Config) {
+ require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+}
diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go
index c944d1c58..7994e7809 100644
--- a/cmd/frostfs-node/config/engine/config.go
+++ b/cmd/frostfs-node/config/engine/config.go
@@ -11,10 +11,6 @@ import (
const (
subsection = "storage"
-
- // ShardPoolSizeDefault is a default value of routine pool size per-shard to
- // process object PUT operations in a storage engine.
- ShardPoolSizeDefault = 20
)
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@@ -41,6 +37,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
c.Sub(si),
)
+ if sc.Mode() == mode.Disabled {
+ continue
+ }
+
// Path for the blobstor can't be present in the default section, because different shards
// must have different paths, so if it is missing, the shard is not here.
// At the same time checking for "blobstor" section doesn't work proper
@@ -50,10 +50,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
}
(*config.Config)(sc).SetDefault(def)
- if sc.Mode() == mode.Disabled {
- continue
- }
-
if err := f(sc); err != nil {
return err
}
@@ -65,18 +61,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
return nil
}
-// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
-//
-// Returns ShardPoolSizeDefault if the value is not a positive number.
-func ShardPoolSize(c *config.Config) uint32 {
- v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
- if v > 0 {
- return v
- }
-
- return ShardPoolSizeDefault
-}
-
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
//
// Returns 0 if the the value is missing.
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index 19ad0e7ac..401c54edc 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -14,10 +14,28 @@ import (
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
)
+func TestIterateShards(t *testing.T) {
+ fileConfigTest := func(c *config.Config) {
+ var res []string
+ require.NoError(t,
+ engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
+ res = append(res, sc.Metabase().Path())
+ return nil
+ }))
+ require.Equal(t, []string{"abc", "xyz"}, res)
+ }
+
+ const cfgDir = "./testdata/shards"
+ configtest.ForEachFileType(cfgDir, fileConfigTest)
+ configtest.ForEnvFileType(t, cfgDir, fileConfigTest)
+}
+
func TestEngineSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
@@ -37,7 +55,6 @@ func TestEngineSection(t *testing.T) {
require.False(t, handlerCalled)
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
- require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
})
@@ -47,7 +64,6 @@ func TestEngineSection(t *testing.T) {
num := 0
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
- require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() {
@@ -60,6 +76,7 @@ func TestEngineSection(t *testing.T) {
ss := blob.Storages()
pl := sc.Pilorama()
gc := sc.GC()
+ limits := sc.Limits()
switch num {
case 0:
@@ -84,10 +101,11 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, 100, meta.BoltDB().MaxBatchSize())
require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay())
- require.Equal(t, true, sc.Compress())
- require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes())
- require.Equal(t, true, sc.EstimateCompressibility())
- require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold())
+ require.Equal(t, true, sc.Compression().Enabled)
+ require.Equal(t, compression.LevelFastest, sc.Compression().Level)
+ require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes)
+ require.Equal(t, true, sc.Compression().EstimateCompressibility)
+ require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold)
require.EqualValues(t, 102400, sc.SmallSizeLimit())
require.Equal(t, 2, len(ss))
@@ -118,6 +136,86 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
+
+ readLimits := limits.ToConfig().Read
+ writeLimits := limits.ToConfig().Write
+ require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
+ require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
+ require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
+ require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
+ require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
+ require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
+ require.ElementsMatch(t, readLimits.Tags,
+ []qos.IOTagConfig{
+ {
+ Tag: "internal",
+ Weight: toPtr(20),
+ ReservedOps: toPtr(1000),
+ LimitOps: toPtr(0),
+ },
+ {
+ Tag: "client",
+ Weight: toPtr(70),
+ ReservedOps: toPtr(10000),
+ },
+ {
+ Tag: "background",
+ Weight: toPtr(5),
+ LimitOps: toPtr(10000),
+ ReservedOps: toPtr(0),
+ },
+ {
+ Tag: "writecache",
+ Weight: toPtr(5),
+ LimitOps: toPtr(25000),
+ },
+ {
+ Tag: "policer",
+ Weight: toPtr(5),
+ LimitOps: toPtr(25000),
+ Prohibited: true,
+ },
+ {
+ Tag: "treesync",
+ Weight: toPtr(5),
+ LimitOps: toPtr(25),
+ },
+ })
+ require.ElementsMatch(t, writeLimits.Tags,
+ []qos.IOTagConfig{
+ {
+ Tag: "internal",
+ Weight: toPtr(200),
+ ReservedOps: toPtr(100),
+ LimitOps: toPtr(0),
+ },
+ {
+ Tag: "client",
+ Weight: toPtr(700),
+ ReservedOps: toPtr(1000),
+ },
+ {
+ Tag: "background",
+ Weight: toPtr(50),
+ LimitOps: toPtr(1000),
+ ReservedOps: toPtr(0),
+ },
+ {
+ Tag: "writecache",
+ Weight: toPtr(50),
+ LimitOps: toPtr(2500),
+ },
+ {
+ Tag: "policer",
+ Weight: toPtr(50),
+ LimitOps: toPtr(2500),
+ },
+ {
+ Tag: "treesync",
+ Weight: toPtr(50),
+ LimitOps: toPtr(100),
+ },
+ })
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm())
@@ -140,8 +238,9 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, 200, meta.BoltDB().MaxBatchSize())
require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay())
- require.Equal(t, false, sc.Compress())
- require.Equal(t, []string(nil), sc.UncompressableContentTypes())
+ require.Equal(t, false, sc.Compression().Enabled)
+ require.Equal(t, compression.LevelDefault, sc.Compression().Level)
+ require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes)
require.EqualValues(t, 102400, sc.SmallSizeLimit())
require.Equal(t, 2, len(ss))
@@ -172,6 +271,17 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
+
+ readLimits := limits.ToConfig().Read
+ writeLimits := limits.ToConfig().Write
+ require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout)
+ require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps)
+ require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps)
+ require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout)
+ require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps)
+ require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps)
+ require.Equal(t, 0, len(readLimits.Tags))
+ require.Equal(t, 0, len(writeLimits.Tags))
}
return nil
})
@@ -185,3 +295,7 @@ func TestEngineSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}
+
+func toPtr(v float64) *float64 {
+ return &v
+}
diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
index a51308b5b..b564d36f8 100644
--- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
+++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
@@ -37,10 +37,7 @@ func (x *Config) Perm() fs.FileMode {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
- if d < 0 {
- d = 0
- }
- return d
+ return max(d, 0)
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@@ -48,10 +45,7 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
- if s < 0 {
- s = 0
- }
- return s
+ return max(s, 0)
}
// NoSync returns the value of "no_sync" config parameter.
@@ -66,8 +60,5 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) PageSize() int {
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
- if s < 0 {
- s = 0
- }
- return s
+ return max(s, 0)
}
diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go
index 0620c9f63..d42646da7 100644
--- a/cmd/frostfs-node/config/engine/shard/config.go
+++ b/cmd/frostfs-node/config/engine/shard/config.go
@@ -4,9 +4,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
+ limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
@@ -26,42 +28,27 @@ func From(c *config.Config) *Config {
return (*Config)(c)
}
-// Compress returns the value of "compress" config parameter.
-//
-// Returns false if the value is not a valid bool.
-func (x *Config) Compress() bool {
- return config.BoolSafe(
- (*config.Config)(x),
- "compress",
- )
-}
-
-// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter.
-//
-// Returns nil if a the value is missing or is invalid.
-func (x *Config) UncompressableContentTypes() []string {
- return config.StringSliceSafe(
- (*config.Config)(x),
- "compression_exclude_content_types")
-}
-
-// EstimateCompressibility returns the value of "estimate_compressibility" config parameter.
-//
-// Returns false if the value is not a valid bool.
-func (x *Config) EstimateCompressibility() bool {
- return config.BoolSafe(
- (*config.Config)(x),
- "compression_estimate_compressibility",
- )
+func (x *Config) Compression() compression.Config {
+ cc := (*config.Config)(x).Sub("compression")
+ if cc == nil {
+ return compression.Config{}
+ }
+ return compression.Config{
+ Enabled: config.BoolSafe(cc, "enabled"),
+ UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"),
+ Level: compression.Level(config.StringSafe(cc, "level")),
+ EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"),
+ EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc),
+ }
}
// EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter.
//
// Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0].
-func (x *Config) EstimateCompressibilityThreshold() float64 {
+func estimateCompressibilityThreshold(c *config.Config) float64 {
v := config.FloatOrDefault(
- (*config.Config)(x),
- "compression_estimate_compressibility_threshold",
+ c,
+ "estimate_compressibility_threshold",
EstimateCompressibilityThresholdDefault)
if v < 0.0 || v > 1.0 {
return EstimateCompressibilityThresholdDefault
@@ -125,6 +112,14 @@ func (x *Config) GC() *gcconfig.Config {
)
}
+// Limits returns "limits" subsection as a limitsconfig.Config.
+func (x *Config) Limits() *limitsconfig.Config {
+ return limitsconfig.From(
+ (*config.Config)(x).
+ Sub("limits"),
+ )
+}
+
// RefillMetabase returns the value of "resync_metabase" config parameter.
//
// Returns false if the value is not a valid bool.
diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go
new file mode 100644
index 000000000..ccd1e0000
--- /dev/null
+++ b/cmd/frostfs-node/config/engine/shard/limits/config.go
@@ -0,0 +1,112 @@
+package limits
+
+import (
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "github.com/spf13/cast"
+)
+
+// From wraps config section into Config.
+func From(c *config.Config) *Config {
+ return (*Config)(c)
+}
+
+// Config is a wrapper over the config section
+// which provides access to Shard's limits configurations.
+type Config config.Config
+
+func (x *Config) ToConfig() qos.LimiterConfig {
+ result := qos.LimiterConfig{
+ Read: x.read(),
+ Write: x.write(),
+ }
+ panicOnErr(result.Validate())
+ return result
+}
+
+func (x *Config) read() qos.OpConfig {
+ return x.parse("read")
+}
+
+func (x *Config) write() qos.OpConfig {
+ return x.parse("write")
+}
+
+func (x *Config) parse(sub string) qos.OpConfig {
+ c := (*config.Config)(x).Sub(sub)
+ var result qos.OpConfig
+
+ if s := config.Int(c, "max_waiting_ops"); s > 0 {
+ result.MaxWaitingOps = s
+ } else {
+ result.MaxWaitingOps = qos.NoLimit
+ }
+
+ if s := config.Int(c, "max_running_ops"); s > 0 {
+ result.MaxRunningOps = s
+ } else {
+ result.MaxRunningOps = qos.NoLimit
+ }
+
+ if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
+ result.IdleTimeout = s
+ } else {
+ result.IdleTimeout = qos.DefaultIdleTimeout
+ }
+
+ result.Tags = tags(c)
+
+ return result
+}
+
+func tags(c *config.Config) []qos.IOTagConfig {
+ c = c.Sub("tags")
+ var result []qos.IOTagConfig
+ for i := 0; ; i++ {
+ tag := config.String(c, strconv.Itoa(i)+".tag")
+ if tag == "" {
+ return result
+ }
+
+ var tagConfig qos.IOTagConfig
+ tagConfig.Tag = tag
+
+ v := c.Value(strconv.Itoa(i) + ".weight")
+ if v != nil {
+ w, err := cast.ToFloat64E(v)
+ panicOnErr(err)
+ tagConfig.Weight = &w
+ }
+
+ v = c.Value(strconv.Itoa(i) + ".limit_ops")
+ if v != nil {
+ l, err := cast.ToFloat64E(v)
+ panicOnErr(err)
+ tagConfig.LimitOps = &l
+ }
+
+ v = c.Value(strconv.Itoa(i) + ".reserved_ops")
+ if v != nil {
+ r, err := cast.ToFloat64E(v)
+ panicOnErr(err)
+ tagConfig.ReservedOps = &r
+ }
+
+ v = c.Value(strconv.Itoa(i) + ".prohibited")
+ if v != nil {
+ r, err := cast.ToBoolE(v)
+ panicOnErr(err)
+ tagConfig.Prohibited = r
+ }
+
+ result = append(result, tagConfig)
+ }
+}
+
+func panicOnErr(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go
index 28671ca55..5d4e8f408 100644
--- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go
+++ b/cmd/frostfs-node/config/engine/shard/pilorama/config.go
@@ -52,10 +52,7 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
- if d <= 0 {
- d = 0
- }
- return d
+ return max(d, 0)
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@@ -63,8 +60,5 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
- if s <= 0 {
- s = 0
- }
- return s
+ return max(s, 0)
}
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.env b/cmd/frostfs-node/config/engine/testdata/shards.env
new file mode 100644
index 000000000..079789b0f
--- /dev/null
+++ b/cmd/frostfs-node/config/engine/testdata/shards.env
@@ -0,0 +1,3 @@
+FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc
+FROSTFS_STORAGE_SHARD_1_MODE=disabled
+FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.json b/cmd/frostfs-node/config/engine/testdata/shards.json
new file mode 100644
index 000000000..b3d6abe85
--- /dev/null
+++ b/cmd/frostfs-node/config/engine/testdata/shards.json
@@ -0,0 +1,13 @@
+{
+ "storage.shard": {
+ "0": {
+ "metabase.path": "abc"
+ },
+ "1": {
+ "mode": "disabled"
+ },
+ "2": {
+ "metabase.path": "xyz"
+ }
+ }
+}
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.yaml b/cmd/frostfs-node/config/engine/testdata/shards.yaml
new file mode 100644
index 000000000..bbbba3af8
--- /dev/null
+++ b/cmd/frostfs-node/config/engine/testdata/shards.yaml
@@ -0,0 +1,7 @@
+storage.shard:
+ 0:
+ metabase.path: abc
+ 1:
+ mode: disabled
+ 2:
+ metabase.path: xyz
diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go
index ba9eeea2b..20f373184 100644
--- a/cmd/frostfs-node/config/logger/config.go
+++ b/cmd/frostfs-node/config/logger/config.go
@@ -2,6 +2,7 @@ package loggerconfig
import (
"os"
+ "strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
@@ -60,6 +61,21 @@ func Timestamp(c *config.Config) bool {
return config.BoolSafe(c.Sub(subsection), "timestamp")
}
+// Tags returns the value of "tags" config parameter from "logger" section.
+func Tags(c *config.Config) [][]string {
+ var res [][]string
+ sub := c.Sub(subsection).Sub("tags")
+ for i := 0; ; i++ {
+ s := sub.Sub(strconv.FormatInt(int64(i), 10))
+ names := config.StringSafe(s, "names")
+ if names == "" {
+ break
+ }
+ res = append(res, []string{names, config.StringSafe(s, "level")})
+ }
+ return res
+}
+
// ToLokiConfig extracts loki config.
func ToLokiConfig(c *config.Config) loki.Config {
hostname, _ := os.Hostname()
diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go
index ffe8ac693..796ad529e 100644
--- a/cmd/frostfs-node/config/logger/config_test.go
+++ b/cmd/frostfs-node/config/logger/config_test.go
@@ -22,6 +22,9 @@ func TestLoggerSection_Level(t *testing.T) {
require.Equal(t, "debug", loggerconfig.Level(c))
require.Equal(t, "journald", loggerconfig.Destination(c))
require.Equal(t, true, loggerconfig.Timestamp(c))
+ tags := loggerconfig.Tags(c)
+ require.Equal(t, "main, morph", tags[0][0])
+ require.Equal(t, "debug", tags[0][1])
}
configtest.ForEachFileType(path, fileConfigTest)
diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go
index 1c536a0e2..a9f774d18 100644
--- a/cmd/frostfs-node/config/morph/config.go
+++ b/cmd/frostfs-node/config/morph/config.go
@@ -30,6 +30,12 @@ const (
// FrostfsIDCacheSizeDefault is a default value of APE chain cache.
FrostfsIDCacheSizeDefault = 10_000
+
+ // ContainerCacheSizeDefault represents the default size for the container cache.
+ ContainerCacheSizeDefault = 100
+
+ // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates.
+ PollCandidatesTimeoutDefault = 20 * time.Second
)
var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
@@ -103,6 +109,18 @@ func CacheTTL(c *config.Config) time.Duration {
return CacheTTLDefault
}
+// ContainerCacheSize returns the value of "container_cache_size" config parameter
+// from "morph" section.
+//
+// Returns 0 if the value is not positive integer.
+// Returns ContainerCacheSizeDefault if the value is missing.
+func ContainerCacheSize(c *config.Config) uint32 {
+ if c.Sub(subsection).Value("container_cache_size") == nil {
+ return ContainerCacheSizeDefault
+ }
+ return config.Uint32Safe(c.Sub(subsection), "container_cache_size")
+}
+
// SwitchInterval returns the value of "switch_interval" config parameter
// from "morph" section.
//
@@ -139,3 +157,17 @@ func FrostfsIDCacheSize(c *config.Config) uint32 {
}
return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size")
}
+
+// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter
+// from "morph" section.
+//
+// Returns PollCandidatesTimeoutDefault if the value is not positive duration.
+func NetmapCandidatesPollInterval(c *config.Config) time.Duration {
+ v := config.DurationSafe(c.Sub(subsection).
+ Sub("netmap").Sub("candidates"), "poll_interval")
+ if v > 0 {
+ return v
+ }
+
+ return PollCandidatesTimeoutDefault
+}
diff --git a/cmd/frostfs-node/config/multinet/config.go b/cmd/frostfs-node/config/multinet/config.go
new file mode 100644
index 000000000..f598efc51
--- /dev/null
+++ b/cmd/frostfs-node/config/multinet/config.go
@@ -0,0 +1,62 @@
+package multinet
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+)
+
+const (
+ subsection = "multinet"
+
+ FallbackDelayDefault = 300 * time.Millisecond
+)
+
+// Enabled returns the value of "enabled" config parameter from "multinet" section.
+func Enabled(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "enabled")
+}
+
+type Subnet struct {
+ Mask string
+ SourceIPs []string
+}
+
+// Subnets returns the value of "subnets" config parameter from "multinet" section.
+func Subnets(c *config.Config) []Subnet {
+ var result []Subnet
+ sub := c.Sub(subsection).Sub("subnets")
+ for i := 0; ; i++ {
+ s := sub.Sub(strconv.FormatInt(int64(i), 10))
+ mask := config.StringSafe(s, "mask")
+ if mask == "" {
+ break
+ }
+ sourceIPs := config.StringSliceSafe(s, "source_ips")
+ result = append(result, Subnet{
+ Mask: mask,
+ SourceIPs: sourceIPs,
+ })
+ }
+ return result
+}
+
+// Balancer returns the value of "balancer" config parameter from "multinet" section.
+func Balancer(c *config.Config) string {
+ return config.StringSafe(c.Sub(subsection), "balancer")
+}
+
+// Restrict returns the value of "restrict" config parameter from "multinet" section.
+func Restrict(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "restrict")
+}
+
+// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section.
+func FallbackDelay(c *config.Config) time.Duration {
+ fd := config.DurationSafe(c.Sub(subsection), "fallback_delay")
+ if fd != 0 { // negative value means no fallback
+ return fd
+ }
+ return FallbackDelayDefault
+}
diff --git a/cmd/frostfs-node/config/multinet/config_test.go b/cmd/frostfs-node/config/multinet/config_test.go
new file mode 100644
index 000000000..5f7dc6d53
--- /dev/null
+++ b/cmd/frostfs-node/config/multinet/config_test.go
@@ -0,0 +1,52 @@
+package multinet
+
+import (
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMultinetSection(t *testing.T) {
+ t.Run("defaults", func(t *testing.T) {
+ empty := configtest.EmptyConfig()
+ require.Equal(t, false, Enabled(empty))
+ require.Equal(t, ([]Subnet)(nil), Subnets(empty))
+ require.Equal(t, "", Balancer(empty))
+ require.Equal(t, false, Restrict(empty))
+ require.Equal(t, FallbackDelayDefault, FallbackDelay(empty))
+ })
+
+ const path = "../../../../config/example/node"
+
+ fileConfigTest := func(c *config.Config) {
+ require.Equal(t, true, Enabled(c))
+ require.Equal(t, []Subnet{
+ {
+ Mask: "192.168.219.174/24",
+ SourceIPs: []string{
+ "192.168.218.185",
+ "192.168.219.185",
+ },
+ },
+ {
+ Mask: "10.78.70.74/24",
+ SourceIPs: []string{
+ "10.78.70.185",
+ "10.78.71.185",
+ },
+ },
+ }, Subnets(c))
+ require.Equal(t, "roundrobin", Balancer(c))
+ require.Equal(t, false, Restrict(c))
+ require.Equal(t, 350*time.Millisecond, FallbackDelay(c))
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+}
diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go
index 4d063245b..c50718c5f 100644
--- a/cmd/frostfs-node/config/node/config.go
+++ b/cmd/frostfs-node/config/node/config.go
@@ -3,7 +3,9 @@ package nodeconfig
import (
"fmt"
"io/fs"
+ "iter"
"os"
+ "slices"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
@@ -88,12 +90,8 @@ func Wallet(c *config.Config) *keys.PrivateKey {
type stringAddressGroup []string
-func (x stringAddressGroup) IterateAddresses(f func(string) bool) {
- for i := range x {
- if f(x[i]) {
- break
- }
- }
+func (x stringAddressGroup) Addresses() iter.Seq[string] {
+ return slices.Values(x)
}
func (x stringAddressGroup) NumberOfAddresses() int {
@@ -133,14 +131,6 @@ func Attributes(c *config.Config) (attrs []string) {
return
}
-// Relay returns the value of "relay" config parameter
-// from "node" section.
-//
-// Returns false if the value is not set.
-func Relay(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "relay")
-}
-
// PersistentSessions returns structure that provides access to "persistent_sessions"
// subsection of "node" section.
func PersistentSessions(c *config.Config) PersistentSessionsConfig {
@@ -198,7 +188,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
//
// Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
- p := config.UintSafe((*config.Config)(l.cfg), "perm")
+ p := config.UintSafe(l.cfg, "perm")
if p == 0 {
p = PermDefault
}
@@ -210,10 +200,15 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
//
// Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool {
- return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
+ return config.BoolSafe(l.cfg, "no_sync")
}
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.
func CompatibilityMode(c *config.Config) bool {
return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode")
}
+
+// LocodeDBPath returns path to LOCODE database.
+func LocodeDBPath(c *config.Config) string {
+ return config.String(c.Sub(subsection), "locode_db_path")
+}
diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go
index 7b9adecf4..9af1dc038 100644
--- a/cmd/frostfs-node/config/node/config_test.go
+++ b/cmd/frostfs-node/config/node/config_test.go
@@ -29,12 +29,10 @@ func TestNodeSection(t *testing.T) {
)
attribute := Attributes(empty)
- relay := Relay(empty)
persisessionsPath := PersistentSessions(empty).Path()
persistatePath := PersistentState(empty).Path()
require.Empty(t, attribute)
- require.Equal(t, false, relay)
require.Equal(t, "", persisessionsPath)
require.Equal(t, PersistentStatePathDefault, persistatePath)
})
@@ -45,7 +43,6 @@ func TestNodeSection(t *testing.T) {
key := Key(c)
addrs := BootstrapAddresses(c)
attributes := Attributes(c)
- relay := Relay(c)
wKey := Wallet(c)
persisessionsPath := PersistentSessions(c).Path()
persistatePath := PersistentState(c).Path()
@@ -87,8 +84,6 @@ func TestNodeSection(t *testing.T) {
return false
})
- require.Equal(t, true, relay)
-
require.Len(t, attributes, 2)
require.Equal(t, "Price:11", attributes[0])
require.Equal(t, "UN-LOCODE:RU MSK", attributes[1])
diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go
index 876dc3ef1..c8c967d30 100644
--- a/cmd/frostfs-node/config/object/config.go
+++ b/cmd/frostfs-node/config/object/config.go
@@ -10,14 +10,17 @@ type PutConfig struct {
cfg *config.Config
}
+// GetConfig is a wrapper over "get" config section which provides access
+// to object get pipeline configuration of object service.
+type GetConfig struct {
+ cfg *config.Config
+}
+
const (
subsection = "object"
putSubsection = "put"
-
- // PutPoolSizeDefault is a default value of routine pool size to
- // process object.Put requests in object service.
- PutPoolSizeDefault = 10
+ getSubsection = "get"
)
// Put returns structure that provides access to "put" subsection of
@@ -28,31 +31,20 @@ func Put(c *config.Config) PutConfig {
}
}
-// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
-//
-// Returns PutPoolSizeDefault if the value is not a positive number.
-func (g PutConfig) PoolSizeRemote() int {
- v := config.Int(g.cfg, "remote_pool_size")
- if v > 0 {
- return int(v)
- }
-
- return PutPoolSizeDefault
-}
-
-// PoolSizeLocal returns the value of "local_pool_size" config parameter.
-//
-// Returns PutPoolSizeDefault if the value is not a positive number.
-func (g PutConfig) PoolSizeLocal() int {
- v := config.Int(g.cfg, "local_pool_size")
- if v > 0 {
- return int(v)
- }
-
- return PutPoolSizeDefault
-}
-
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
}
+
+// Get returns structure that provides access to "get" subsection of
+// "object" section.
+func Get(c *config.Config) GetConfig {
+ return GetConfig{
+ c.Sub(subsection).Sub(getSubsection),
+ }
+}
+
+// Priority returns the value of "priority" config parameter.
+func (g GetConfig) Priority() []string {
+ return config.StringSliceSafe(g.cfg, "priority")
+}
diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go
index e2bb105d9..1c525ef55 100644
--- a/cmd/frostfs-node/config/object/config_test.go
+++ b/cmd/frostfs-node/config/object/config_test.go
@@ -13,8 +13,6 @@ func TestObjectSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
- require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
- require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
})
@@ -22,8 +20,6 @@ func TestObjectSection(t *testing.T) {
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
- require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
- require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
}
diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go
index 191694970..6c3e8adab 100644
--- a/cmd/frostfs-node/config/profiler/config.go
+++ b/cmd/frostfs-node/config/profiler/config.go
@@ -52,7 +52,7 @@ func Address(c *config.Config) string {
return AddressDefault
}
-// BlockRates returns the value of "block_rate" config parameter
+// BlockRate returns the value of "block_rate" config parameter
// from "pprof" section.
func BlockRate(c *config.Config) int {
s := c.Sub(subsection)
diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go
new file mode 100644
index 000000000..85f8180ed
--- /dev/null
+++ b/cmd/frostfs-node/config/qos/config.go
@@ -0,0 +1,46 @@
+package qos
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+const (
+ subsection = "qos"
+ criticalSubSection = "critical"
+ internalSubSection = "internal"
+)
+
+// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config
+// parameter from "qos" section.
+//
+// Returns an empty list if not set.
+func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys {
+ return authorizedKeys(c, criticalSubSection)
+}
+
+// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config
+// parameter from "qos" section.
+//
+// Returns an empty list if not set.
+func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys {
+ return authorizedKeys(c, internalSubSection)
+}
+
+func authorizedKeys(c *config.Config, sub string) keys.PublicKeys {
+ strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys")
+ pubs := make(keys.PublicKeys, 0, len(strKeys))
+
+ for i := range strKeys {
+ pub, err := keys.NewPublicKeyFromString(strKeys[i])
+ if err != nil {
+ panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err))
+ }
+
+ pubs = append(pubs, pub)
+ }
+
+ return pubs
+}
diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go
new file mode 100644
index 000000000..b3b6019cc
--- /dev/null
+++ b/cmd/frostfs-node/config/qos/config_test.go
@@ -0,0 +1,40 @@
+package qos
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+func TestQoSSection(t *testing.T) {
+ t.Run("defaults", func(t *testing.T) {
+ empty := configtest.EmptyConfig()
+
+ require.Empty(t, CriticalAuthorizedKeys(empty))
+ require.Empty(t, InternalAuthorizedKeys(empty))
+ })
+
+ const path = "../../../../config/example/node"
+
+ criticalPubs := make(keys.PublicKeys, 2)
+ criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
+ criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
+
+ internalPubs := make(keys.PublicKeys, 2)
+ internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2")
+ internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a")
+
+ fileConfigTest := func(c *config.Config) {
+ require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c))
+ require.Equal(t, internalPubs, InternalAuthorizedKeys(c))
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+}
diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go
index 0fbac935c..e954bf19d 100644
--- a/cmd/frostfs-node/config/replicator/config.go
+++ b/cmd/frostfs-node/config/replicator/config.go
@@ -11,6 +11,8 @@ const (
// PutTimeoutDefault is a default timeout of object put request in replicator.
PutTimeoutDefault = 5 * time.Second
+ // PoolSizeDefault is a default pool size for put request in replicator.
+ PoolSizeDefault = 10
)
// PutTimeout returns the value of "put_timeout" config parameter
@@ -28,6 +30,13 @@ func PutTimeout(c *config.Config) time.Duration {
// PoolSize returns the value of "pool_size" config parameter
// from "replicator" section.
+//
+// Returns PoolSizeDefault if the value is non-positive integer.
func PoolSize(c *config.Config) int {
- return int(config.IntSafe(c.Sub(subsection), "pool_size"))
+ v := int(config.IntSafe(c.Sub(subsection), "pool_size"))
+ if v > 0 {
+ return v
+ }
+
+ return PoolSizeDefault
}
diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go
index 2129c01b4..2aa490946 100644
--- a/cmd/frostfs-node/config/replicator/config_test.go
+++ b/cmd/frostfs-node/config/replicator/config_test.go
@@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
- require.Equal(t, 0, replicatorconfig.PoolSize(empty))
+ require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty))
})
const path = "../../../../config/example/node"
diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go
new file mode 100644
index 000000000..e0efdfde2
--- /dev/null
+++ b/cmd/frostfs-node/config/rpc/config.go
@@ -0,0 +1,42 @@
+package rpcconfig
+
+import (
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+)
+
+const (
+ subsection = "rpc"
+ limitsSubsection = "limits"
+)
+
+type LimitConfig struct {
+ Methods []string
+ MaxOps int64
+}
+
+// Limits returns the "limits" config from "rpc" section.
+func Limits(c *config.Config) []LimitConfig {
+ c = c.Sub(subsection).Sub(limitsSubsection)
+
+ var limits []LimitConfig
+
+ for i := uint64(0); ; i++ {
+ si := strconv.FormatUint(i, 10)
+ sc := c.Sub(si)
+
+ methods := config.StringSliceSafe(sc, "methods")
+ if len(methods) == 0 {
+ break
+ }
+
+ if sc.Value("max_ops") == nil {
+ panic("no max operations for method group")
+ }
+
+ limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
+ }
+
+ return limits
+}
diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go
new file mode 100644
index 000000000..a6365e19f
--- /dev/null
+++ b/cmd/frostfs-node/config/rpc/config_test.go
@@ -0,0 +1,77 @@
+package rpcconfig
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRPCSection(t *testing.T) {
+ t.Run("defaults", func(t *testing.T) {
+ require.Empty(t, Limits(configtest.EmptyConfig()))
+ })
+
+ t.Run("correct config", func(t *testing.T) {
+ const path = "../../../../config/example/node"
+
+ fileConfigTest := func(c *config.Config) {
+ limits := Limits(c)
+ require.Len(t, limits, 2)
+
+ limit0 := limits[0]
+ limit1 := limits[1]
+
+ require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
+ require.Equal(t, limit0.MaxOps, int64(1000))
+
+ require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
+ require.Equal(t, limit1.MaxOps, int64(10000))
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+ })
+
+ t.Run("no max operations", func(t *testing.T) {
+ const path = "testdata/no_max_ops"
+
+ fileConfigTest := func(c *config.Config) {
+ require.Panics(t, func() { _ = Limits(c) })
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+ })
+
+ t.Run("zero max operations", func(t *testing.T) {
+ const path = "testdata/zero_max_ops"
+
+ fileConfigTest := func(c *config.Config) {
+ limits := Limits(c)
+ require.Len(t, limits, 2)
+
+ limit0 := limits[0]
+ limit1 := limits[1]
+
+ require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
+ require.Equal(t, limit0.MaxOps, int64(0))
+
+ require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
+ require.Equal(t, limit1.MaxOps, int64(10000))
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+ })
+}
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env
new file mode 100644
index 000000000..2fed4c5bc
--- /dev/null
+++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env
@@ -0,0 +1,3 @@
+FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
+FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
+FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json
new file mode 100644
index 000000000..6156aa71d
--- /dev/null
+++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json
@@ -0,0 +1,18 @@
+{
+ "rpc": {
+ "limits": [
+ {
+ "methods": [
+ "/neo.fs.v2.object.ObjectService/PutSingle",
+ "/neo.fs.v2.object.ObjectService/Put"
+ ]
+ },
+ {
+ "methods": [
+ "/neo.fs.v2.object.ObjectService/Get"
+ ],
+ "max_ops": 10000
+ }
+ ]
+ }
+}
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml
new file mode 100644
index 000000000..e50b7ae93
--- /dev/null
+++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml
@@ -0,0 +1,8 @@
+rpc:
+ limits:
+ - methods:
+ - /neo.fs.v2.object.ObjectService/PutSingle
+ - /neo.fs.v2.object.ObjectService/Put
+ - methods:
+ - /neo.fs.v2.object.ObjectService/Get
+ max_ops: 10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
new file mode 100644
index 000000000..ce7302b0b
--- /dev/null
+++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
@@ -0,0 +1,4 @@
+FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
+FROSTFS_RPC_LIMITS_0_MAX_OPS=0
+FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
+FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
new file mode 100644
index 000000000..16a1c173f
--- /dev/null
+++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
@@ -0,0 +1,19 @@
+{
+ "rpc": {
+ "limits": [
+ {
+ "methods": [
+ "/neo.fs.v2.object.ObjectService/PutSingle",
+ "/neo.fs.v2.object.ObjectService/Put"
+ ],
+ "max_ops": 0
+ },
+ {
+ "methods": [
+ "/neo.fs.v2.object.ObjectService/Get"
+ ],
+ "max_ops": 10000
+ }
+ ]
+ }
+}
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
new file mode 100644
index 000000000..525d768d4
--- /dev/null
+++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
@@ -0,0 +1,9 @@
+rpc:
+ limits:
+ - methods:
+ - /neo.fs.v2.object.ObjectService/PutSingle
+ - /neo.fs.v2.object.ObjectService/Put
+ max_ops: 0
+ - methods:
+ - /neo.fs.v2.object.ObjectService/Get
+ max_ops: 10000
diff --git a/cmd/frostfs-node/config/test/config.go b/cmd/frostfs-node/config/test/config.go
index a93d7e648..e98c032f0 100644
--- a/cmd/frostfs-node/config/test/config.go
+++ b/cmd/frostfs-node/config/test/config.go
@@ -11,8 +11,6 @@ import (
)
func fromFile(path string) *config.Config {
- os.Clearenv() // ENVs have priority over config files, so we do this in tests
-
return config.New(path, "", "")
}
@@ -40,15 +38,6 @@ func ForEachFileType(pref string, f func(*config.Config)) {
// ForEnvFileType creates config from `.env` file.
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
- envs := os.Environ()
- t.Cleanup(func() {
- os.Clearenv()
- for _, env := range envs {
- keyValue := strings.Split(env, "=")
- os.Setenv(keyValue[0], keyValue[1])
- }
- })
-
f(fromEnvFile(t, pref+".env"))
}
@@ -73,7 +62,6 @@ func loadEnv(t testing.TB, path string) {
v = strings.Trim(v, `"`)
- err = os.Setenv(k, v)
- require.NoError(t, err, "can't set environment variable")
+ t.Setenv(k, v)
}
}
diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go
index 8544c672c..91ef669ee 100644
--- a/cmd/frostfs-node/config/tracing/config.go
+++ b/cmd/frostfs-node/config/tracing/config.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"os"
+ "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
@@ -24,6 +25,7 @@ func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
Service: "frostfs-node",
InstanceID: getInstanceIDOrDefault(c),
Version: misc.Version,
+ Attributes: make(map[string]string),
}
if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" {
@@ -38,11 +40,30 @@ func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
}
conf.ServerCaCertPool = certPool
}
+
+ i := uint64(0)
+ for ; ; i++ {
+ si := strconv.FormatUint(i, 10)
+ ac := c.Sub(subsection).Sub("attributes").Sub(si)
+ k := config.StringSafe(ac, "key")
+ if k == "" {
+ break
+ }
+ v := config.StringSafe(ac, "value")
+ if v == "" {
+ return nil, fmt.Errorf("empty tracing attribute value for key %s", k)
+ }
+ if _, ok := conf.Attributes[k]; ok {
+ return nil, fmt.Errorf("tracing attribute key %s defined more than once", k)
+ }
+ conf.Attributes[k] = v
+ }
+
return conf, nil
}
func getInstanceIDOrDefault(c *config.Config) string {
- s := config.StringSlice(c.Sub("node"), "addresses")
+ s := config.StringSliceSafe(c.Sub("node"), "addresses")
if len(s) > 0 {
return s[0]
}
diff --git a/cmd/frostfs-node/config/tracing/config_test.go b/cmd/frostfs-node/config/tracing/config_test.go
new file mode 100644
index 000000000..8e485ca6e
--- /dev/null
+++ b/cmd/frostfs-node/config/tracing/config_test.go
@@ -0,0 +1,46 @@
+package tracing
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTracingSection(t *testing.T) {
+ t.Run("defaults", func(t *testing.T) {
+ tc, err := ToTracingConfig(configtest.EmptyConfig())
+ require.NoError(t, err)
+ require.Equal(t, false, tc.Enabled)
+ require.Equal(t, tracing.Exporter(""), tc.Exporter)
+ require.Equal(t, "", tc.Endpoint)
+ require.Equal(t, "frostfs-node", tc.Service)
+ require.Equal(t, "", tc.InstanceID)
+ require.Nil(t, tc.ServerCaCertPool)
+ require.Empty(t, tc.Attributes)
+ })
+
+ const path = "../../../../config/example/node"
+
+ fileConfigTest := func(c *config.Config) {
+ tc, err := ToTracingConfig(c)
+ require.NoError(t, err)
+ require.Equal(t, true, tc.Enabled)
+ require.Equal(t, tracing.OTLPgRPCExporter, tc.Exporter)
+ require.Equal(t, "localhost", tc.Endpoint)
+ require.Equal(t, "frostfs-node", tc.Service)
+ require.Nil(t, tc.ServerCaCertPool)
+ require.EqualValues(t, map[string]string{
+ "key0": "value",
+ "key1": "value",
+ }, tc.Attributes)
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+}
diff --git a/cmd/frostfs-node/config/tree/config.go b/cmd/frostfs-node/config/tree/config.go
index 8a8919999..da877791e 100644
--- a/cmd/frostfs-node/config/tree/config.go
+++ b/cmd/frostfs-node/config/tree/config.go
@@ -10,6 +10,8 @@ import (
const (
subsection = "tree"
+
+ SyncBatchSizeDefault = 1000
)
// TreeConfig is a wrapper over "tree" config section
@@ -74,6 +76,17 @@ func (c TreeConfig) SyncInterval() time.Duration {
return config.DurationSafe(c.cfg, "sync_interval")
}
+// SyncBatchSize returns the value of "sync_batch_size"
+// config parameter from the "tree" section.
+//
+// Returns `SyncBatchSizeDefault` if config value is not specified.
+func (c TreeConfig) SyncBatchSize() int {
+ if v := config.IntSafe(c.cfg, "sync_batch_size"); v > 0 {
+ return int(v)
+ }
+ return SyncBatchSizeDefault
+}
+
// AuthorizedKeys parses and returns an array of "authorized_keys" config
// parameter from "tree" section.
//
diff --git a/cmd/frostfs-node/config/tree/config_test.go b/cmd/frostfs-node/config/tree/config_test.go
index 285ea0725..6628b8878 100644
--- a/cmd/frostfs-node/config/tree/config_test.go
+++ b/cmd/frostfs-node/config/tree/config_test.go
@@ -44,6 +44,7 @@ func TestTreeSection(t *testing.T) {
require.Equal(t, 32, treeSec.ReplicationWorkerCount())
require.Equal(t, 5*time.Second, treeSec.ReplicationTimeout())
require.Equal(t, time.Hour, treeSec.SyncInterval())
+ require.Equal(t, 2000, treeSec.SyncBatchSize())
require.Equal(t, expectedKeys, treeSec.AuthorizedKeys())
}
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index 5a29aac76..bdb280d87 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -5,9 +5,10 @@ import (
"context"
"net"
- containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
+ containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
@@ -17,6 +18,7 @@ import (
containerTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/container/grpc"
containerService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
containerMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph"
+ containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
@@ -27,10 +29,10 @@ import (
func initContainerService(_ context.Context, c *cfg) {
// container wrapper that tries to invoke notary
// requests if chain is configured so
- wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
+ wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
fatalOnErr(err)
- c.shared.cnrClient = wrap
+ c.cnrClient = wrap
cnrSrc := cntClient.AsContainerSource(wrap)
@@ -41,11 +43,12 @@ func initContainerService(_ context.Context, c *cfg) {
fatalOnErr(err)
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
- if cacheSize > 0 {
- frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL)
+ if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
+ frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
}
- c.shared.frostfsidClient = frostfsIDSubjectProvider
+ c.frostfsidClient = frostfsIDSubjectProvider
+ c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
@@ -54,8 +57,10 @@ func initContainerService(_ context.Context, c *cfg) {
service := containerService.NewSignService(
&c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
- newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
- containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
+ newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient,
+ containerService.NewSplitterService(
+ c.cfgContainer.containerBatchSize, c.respSvc,
+ containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
),
)
service = containerService.NewAuditService(service, c.log, c.audit)
@@ -63,16 +68,15 @@ func initContainerService(_ context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
containerGRPC.RegisterContainerServiceServer(s, server)
+
+ // TODO(@aarifullin): #1487 remove the dual service support.
+ s.RegisterService(frostFSServiceDesc(containerGRPC.ContainerService_ServiceDesc), server)
})
c.cfgObject.cfgLocalStorage.localStorage.SetContainerSource(cnrRdr)
}
func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc containerCore.Source) (*morphContainerReader, *morphContainerWriter) {
- eACLFetcher := &morphEACLFetcher{
- w: client,
- }
-
cnrRdr := new(morphContainerReader)
cnrWrt := &morphContainerWriter{
@@ -80,57 +84,51 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
}
if c.cfgMorph.cacheTTL <= 0 {
- c.cfgObject.eaclSource = eACLFetcher
- cnrRdr.eacl = eACLFetcher
c.cfgObject.cnrSource = cnrSrc
cnrRdr.src = cnrSrc
cnrRdr.lister = client
} else {
// use RPC node as source of Container contract items (with caching)
- cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL)
- cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
+ c.cfgObject.cnrSource = cnrSrc
+ if c.cfgMorph.containerCacheSize > 0 {
+ containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
- subscribeToContainerCreation(c, func(e event.Event) {
- ev := e.(containerEvent.PutSuccess)
+ subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) {
+ ev := e.(containerEvent.PutSuccess)
- // read owner of the created container in order to update the reading cache.
- // TODO: use owner directly from the event after neofs-contract#256 will become resolved
- // but don't forget about the profit of reading the new container and caching it:
- // creation success are most commonly tracked by polling GET op.
- cnr, err := cnrSrc.Get(ev.ID)
- if err == nil {
- cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
- } else {
- // unlike removal, we expect successful receive of the container
- // after successful creation, so logging can be useful
- c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
+ // read owner of the created container in order to update the reading cache.
+ // TODO: use owner directly from the event after neofs-contract#256 will become resolved
+ // but don't forget about the profit of reading the new container and caching it:
+ // creation success are most commonly tracked by polling GET op.
+ cnr, err := cnrSrc.Get(ctx, ev.ID)
+ if err == nil {
+ containerCache.containerCache.set(ev.ID, cnr, nil)
+ } else {
+ // unlike removal, we expect successful receive of the container
+ // after successful creation, so logging can be useful
+ c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
+ zap.Stringer("id", ev.ID),
+ zap.Error(err),
+ )
+ }
+
+ c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt,
zap.Stringer("id", ev.ID),
- zap.Error(err),
)
- }
+ })
- c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
- zap.Stringer("id", ev.ID),
- )
- })
-
- subscribeToContainerRemoval(c, func(e event.Event) {
- ev := e.(containerEvent.DeleteSuccess)
- cachedContainerStorage.handleRemoval(ev.ID)
- c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
- zap.Stringer("id", ev.ID),
- )
- })
-
- c.cfgObject.eaclSource = cachedEACLStorage
- c.cfgObject.cnrSource = cachedContainerStorage
+ subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
+ ev := e.(containerEvent.DeleteSuccess)
+ containerCache.handleRemoval(ev.ID)
+ c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt,
+ zap.Stringer("id", ev.ID),
+ )
+ })
+ c.cfgObject.cnrSource = containerCache
+ }
cnrRdr.lister = client
- cnrRdr.eacl = c.cfgObject.eaclSource
cnrRdr.src = c.cfgObject.cnrSource
-
- cnrWrt.cacheEnabled = true
- cnrWrt.eacls = cachedEACLStorage
}
return cnrRdr, cnrWrt
@@ -220,42 +218,38 @@ func (c *cfg) ExternalAddresses() []string {
// implements interface required by container service provided by morph executor.
type morphContainerReader struct {
- eacl containerCore.EACLSource
-
src containerCore.Source
lister interface {
- ContainersOf(*user.ID) ([]cid.ID, error)
+ ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
+ IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
}
}
-func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
- return x.src.Get(id)
+func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) {
+ return x.src.Get(ctx, id)
}
-func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
- return x.src.DeletionInfo(id)
+func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) {
+ return x.src.DeletionInfo(ctx, id)
}
-func (x *morphContainerReader) GetEACL(id cid.ID) (*containerCore.EACL, error) {
- return x.eacl.GetEACL(id)
+func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) {
+ return x.lister.ContainersOf(ctx, id)
}
-func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
- return x.lister.ContainersOf(id)
+func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error {
+ return x.lister.IterateContainersOf(ctx, id, processCID)
}
type morphContainerWriter struct {
neoClient *cntClient.Client
-
- cacheEnabled bool
- eacls ttlEACLStorage
}
-func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
- return cntClient.Put(m.neoClient, cnr)
+func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) {
+ return cntClient.Put(ctx, m.neoClient, cnr)
}
-func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error {
- return cntClient.Delete(m.neoClient, witness)
+func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error {
+ return cntClient.Delete(ctx, m.neoClient, witness)
}
diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go
index e1e6e3ac9..1825013c7 100644
--- a/cmd/frostfs-node/control.go
+++ b/cmd/frostfs-node/control.go
@@ -7,16 +7,19 @@ import (
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
+ metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
+ tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"go.uber.org/zap"
"google.golang.org/grpc"
)
const serviceNameControl = "control"
-func initControlService(c *cfg) {
+func initControlService(ctx context.Context, c *cfg) {
endpoint := controlconfig.GRPC(c.appCfg).Endpoint()
if endpoint == controlconfig.GRPCEndpointDefault {
return
@@ -46,21 +49,28 @@ func initControlService(c *cfg) {
lis, err := net.Listen("tcp", endpoint)
if err != nil {
- c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
return
}
- c.cfgControlService.server = grpc.NewServer()
+ c.cfgControlService.server = grpc.NewServer(
+ grpc.ChainUnaryInterceptor(
+ qos.NewSetCriticalIOTagUnaryServerInterceptor(),
+ metrics.NewUnaryServerInterceptor(),
+ tracing.NewUnaryServerInterceptor(),
+ ),
+ // control service has no stream methods, so no stream interceptors added
+ )
c.onShutdown(func() {
- stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log)
+ stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)
})
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
- c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
+ c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", serviceNameControl),
zap.String("endpoint", endpoint))
fatalOnErr(c.cfgControlService.server.Serve(lis))
@@ -72,23 +82,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus {
return c.cfgNetmap.state.controlNetmapStatus()
}
-func (c *cfg) setHealthStatus(st control.HealthStatus) {
- c.notifySystemd(st)
+func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) {
+ c.notifySystemd(ctx, st)
c.healthStatus.Store(int32(st))
c.metricsCollector.State().SetHealth(int32(st))
}
-func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
+func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
- c.notifySystemd(newSt)
+ c.notifySystemd(ctx, newSt)
c.metricsCollector.State().SetHealth(int32(newSt))
}
return
}
-func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) {
+func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) {
old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
- c.notifySystemd(st)
+ c.notifySystemd(ctx, st)
c.metricsCollector.State().SetHealth(int32(st))
return
}
@@ -97,7 +107,7 @@ func (c *cfg) HealthStatus() control.HealthStatus {
return control.HealthStatus(c.healthStatus.Load())
}
-func (c *cfg) notifySystemd(st control.HealthStatus) {
+func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
if !c.sdNotify {
return
}
@@ -113,6 +123,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) {
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
if err != nil {
- c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+ c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}
diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go
index fb55a6019..d2d4e9785 100644
--- a/cmd/frostfs-node/frostfsid.go
+++ b/cmd/frostfs-node/frostfsid.go
@@ -1,6 +1,8 @@
package main
import (
+ "context"
+ "strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
@@ -9,57 +11,101 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
)
+type subjectWithError struct {
+ subject *client.Subject
+ err error
+}
+
+type subjectExtWithError struct {
+ subject *client.SubjectExtended
+ err error
+}
+
type morphFrostfsIDCache struct {
subjProvider frostfsidcore.SubjectProvider
- subjCache *expirable.LRU[util.Uint160, *client.Subject]
+ subjCache *expirable.LRU[util.Uint160, subjectWithError]
- subjExtCache *expirable.LRU[util.Uint160, *client.SubjectExtended]
+ subjExtCache *expirable.LRU[util.Uint160, subjectExtWithError]
+
+ metrics cacheMetrics
}
-func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration) frostfsidcore.SubjectProvider {
+func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration, metrics cacheMetrics) frostfsidcore.SubjectProvider {
return &morphFrostfsIDCache{
subjProvider: subjProvider,
- subjCache: expirable.NewLRU(size, func(util.Uint160, *client.Subject) {}, ttl),
+ subjCache: expirable.NewLRU(size, func(util.Uint160, subjectWithError) {}, ttl),
- subjExtCache: expirable.NewLRU(size, func(util.Uint160, *client.SubjectExtended) {}, ttl),
+ subjExtCache: expirable.NewLRU(size, func(util.Uint160, subjectExtWithError) {}, ttl),
+
+ metrics: metrics,
}
}
-func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
+func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
+ hit := false
+ startedAt := time.Now()
+ defer func() {
+ m.metrics.AddMethodDuration("GetSubject", time.Since(startedAt), hit)
+ }()
+
result, found := m.subjCache.Get(addr)
if found {
- return result, nil
+ hit = true
+ return result.subject, result.err
}
- result, err := m.subjProvider.GetSubject(addr)
+ subj, err := m.subjProvider.GetSubject(ctx, addr)
if err != nil {
+ if m.isCacheableError(err) {
+ m.subjCache.Add(addr, subjectWithError{
+ err: err,
+ })
+ }
return nil, err
}
- m.subjCache.Add(addr, result)
- return result, nil
+ m.subjCache.Add(addr, subjectWithError{subject: subj})
+ return subj, nil
}
-func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
- subjExt, found := m.subjExtCache.Get(addr)
+func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
+ hit := false
+ startedAt := time.Now()
+ defer func() {
+ m.metrics.AddMethodDuration("GetSubjectExtended", time.Since(startedAt), hit)
+ }()
+
+ result, found := m.subjExtCache.Get(addr)
if found {
- return subjExt, nil
+ hit = true
+ return result.subject, result.err
}
- var err error
- subjExt, err = m.subjProvider.GetSubjectExtended(addr)
+ subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr)
if err != nil {
+ if m.isCacheableError(err) {
+ m.subjExtCache.Add(addr, subjectExtWithError{
+ err: err,
+ })
+ m.subjCache.Add(addr, subjectWithError{
+ err: err,
+ })
+ }
return nil, err
}
- m.subjExtCache.Add(addr, subjExt)
- m.subjCache.Add(addr, subjectFromSubjectExtended(subjExt))
+ m.subjExtCache.Add(addr, subjectExtWithError{subject: subjExt})
+ m.subjCache.Add(addr, subjectWithError{subject: subjectFromSubjectExtended(subjExt)})
return subjExt, nil
}
+func (m *morphFrostfsIDCache) isCacheableError(err error) bool {
+ return strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage)
+}
+
func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject {
return &client.Subject{
PrimaryKey: subjExt.PrimaryKey,
diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go
index 3a38b2cca..6b6d44750 100644
--- a/cmd/frostfs-node/grpc.go
+++ b/cmd/frostfs-node/grpc.go
@@ -1,16 +1,22 @@
package main
import (
+ "context"
"crypto/tls"
"errors"
+ "fmt"
"net"
"time"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
+ rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
+ qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
@@ -18,11 +24,11 @@ import (
const maxRecvMsgSize = 256 << 20
-func initGRPC(c *cfg) {
+func initGRPC(ctx context.Context, c *cfg) {
var endpointsToReconnect []string
var successCount int
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
- serverOpts, ok := getGrpcServerOpts(c, sc)
+ serverOpts, ok := getGrpcServerOpts(ctx, c, sc)
if !ok {
return
}
@@ -30,7 +36,7 @@ func initGRPC(c *cfg) {
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
- c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
return
}
@@ -39,7 +45,7 @@ func initGRPC(c *cfg) {
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
- stopGRPC("FrostFS Public API", srv, c.log)
+ stopGRPC(ctx, "FrostFS Public API", srv, c.log)
})
c.cfgGRPC.append(sc.Endpoint(), lis, srv)
@@ -52,11 +58,11 @@ func initGRPC(c *cfg) {
c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg)
for _, endpoint := range endpointsToReconnect {
- scheduleReconnect(endpoint, c)
+ scheduleReconnect(ctx, endpoint, c)
}
}
-func scheduleReconnect(endpoint string, c *cfg) {
+func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
c.wg.Add(1)
go func() {
defer c.wg.Done()
@@ -65,7 +71,7 @@ func scheduleReconnect(endpoint string, c *cfg) {
for {
select {
case <-t.C:
- if tryReconnect(endpoint, c) {
+ if tryReconnect(ctx, endpoint, c) {
return
}
case <-c.done:
@@ -75,20 +81,20 @@ func scheduleReconnect(endpoint string, c *cfg) {
}()
}
-func tryReconnect(endpoint string, c *cfg) bool {
- c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
+func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
+ c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
- serverOpts, found := getGRPCEndpointOpts(endpoint, c)
+ serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c)
if !found {
- c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
+ c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
return true
}
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
- c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
- c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
+ c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
+ c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
return false
}
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
@@ -96,16 +102,16 @@ func tryReconnect(endpoint string, c *cfg) bool {
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
- stopGRPC("FrostFS Public API", srv, c.log)
+ stopGRPC(ctx, "FrostFS Public API", srv, c.log)
})
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
- c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
+ c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
return true
}
-func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
+func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
unlock := c.LockAppConfigShared()
defer unlock()
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
@@ -116,7 +122,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f
return
}
var ok bool
- result, ok = getGrpcServerOpts(c, sc)
+ result, ok = getGrpcServerOpts(ctx, c, sc)
if !ok {
return
}
@@ -125,16 +131,20 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f
return
}
-func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
+func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
serverOpts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxRecvMsgSize),
grpc.ChainUnaryInterceptor(
+ qos.NewUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(),
+ qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
),
grpc.ChainStreamInterceptor(
+ qos.NewStreamServerInterceptor(),
metrics.NewStreamServerInterceptor(),
tracing.NewStreamServerInterceptor(),
+ qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
),
}
@@ -143,7 +153,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
if tlsCfg != nil {
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
if err != nil {
- c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
return nil, false
}
@@ -174,38 +184,38 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
return serverOpts, true
}
-func serveGRPC(c *cfg) {
+func serveGRPC(ctx context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) {
c.wg.Add(1)
go func() {
defer func() {
- c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
+ c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint,
zap.Stringer("endpoint", l.Addr()),
)
c.wg.Done()
}()
- c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
+ c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", "gRPC"),
zap.Stringer("endpoint", l.Addr()),
)
if err := s.Serve(l); err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
- c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err))
c.cfgGRPC.dropConnection(e)
- scheduleReconnect(e, c)
+ scheduleReconnect(ctx, e, c)
}
}()
})
}
-func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
- l = &logger.Logger{Logger: l.With(zap.String("name", name))}
+func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) {
+ l = l.With(zap.String("name", name))
- l.Info(logs.FrostFSNodeStoppingGRPCServer)
+ l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer)
// GracefulStop() may freeze forever, see #1270
done := make(chan struct{})
@@ -217,9 +227,60 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
select {
case <-done:
case <-time.After(1 * time.Minute):
- l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
+ l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
s.Stop()
}
- l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
+ l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
+}
+
+func initRPCLimiter(c *cfg) error {
+ var limits []limiting.KeyLimit
+ for _, l := range rpcconfig.Limits(c.appCfg) {
+ limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
+ }
+
+ if err := validateRPCLimits(c, limits); err != nil {
+ return fmt.Errorf("validate RPC limits: %w", err)
+ }
+
+ limiter, err := limiting.NewSemaphoreLimiter(limits)
+ if err != nil {
+ return fmt.Errorf("create RPC limiter: %w", err)
+ }
+
+ c.cfgGRPC.limiter.Store(limiter)
+ return nil
+}
+
+func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
+ availableMethods := getAvailableMethods(c.cfgGRPC.servers)
+ for _, limit := range limits {
+ for _, method := range limit.Keys {
+ if _, ok := availableMethods[method]; !ok {
+ return fmt.Errorf("set limit on an unknown method %q", method)
+ }
+ }
+ }
+ return nil
+}
+
+func getAvailableMethods(servers []grpcServer) map[string]struct{} {
+ res := make(map[string]struct{})
+ for _, server := range servers {
+ for _, method := range getMethodsForServer(server.Server) {
+ res[method] = struct{}{}
+ }
+ }
+ return res
+}
+
+func getMethodsForServer(server *grpc.Server) []string {
+ var res []string
+ for service, info := range server.GetServiceInfo() {
+ for _, method := range info.Methods {
+ res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
+ }
+ }
+ return res
}
diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go
index 2ec20d848..7346206ef 100644
--- a/cmd/frostfs-node/httpcomponent.go
+++ b/cmd/frostfs-node/httpcomponent.go
@@ -20,9 +20,9 @@ type httpComponent struct {
preReload func(c *cfg)
}
-func (cmp *httpComponent) init(c *cfg) {
+func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
if !cmp.enabled {
- c.log.Info(cmp.name + " is disabled")
+ c.log.Info(ctx, cmp.name+" is disabled")
return
}
// Init server with parameters
@@ -39,14 +39,14 @@ func (cmp *httpComponent) init(c *cfg) {
go func() {
defer c.wg.Done()
- c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
+ c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", cmp.name),
zap.String("endpoint", cmp.address))
fatalOnErr(srv.Serve())
}()
c.closers = append(c.closers, closer{
cmp.name,
- func() { stopAndLog(c, cmp.name, srv.Shutdown) },
+ func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) },
})
}
@@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error {
// Cleanup
delCloser(cmp.cfg, cmp.name)
// Init server with new parameters
- cmp.init(cmp.cfg)
+ cmp.init(ctx, cmp.cfg)
// Start worker
if cmp.enabled {
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index e4f0a434c..0228d2a10 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -61,21 +61,21 @@ func main() {
var ctx context.Context
ctx, c.ctxCancel = context.WithCancel(context.Background())
- c.setHealthStatus(control.HealthStatus_STARTING)
+ c.setHealthStatus(ctx, control.HealthStatus_STARTING)
initApp(ctx, c)
bootUp(ctx, c)
- c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY)
+ c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY)
wait(c)
}
-func initAndLog(c *cfg, name string, initializer func(*cfg)) {
- c.log.Info(fmt.Sprintf("initializing %s service...", name))
+func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) {
+ c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name))
initializer(c)
- c.log.Info(name + " service has been successfully initialized")
+ c.log.Info(ctx, name+" service has been successfully initialized")
}
func initApp(ctx context.Context, c *cfg) {
@@ -85,72 +85,75 @@ func initApp(ctx context.Context, c *cfg) {
c.wg.Done()
}()
- setRuntimeParameters(c)
+ setRuntimeParameters(ctx, c)
metrics, _ := metricsComponent(c)
- initAndLog(c, "profiler", initProfilerService)
- initAndLog(c, metrics.name, metrics.init)
+ initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) })
+ initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) })
- initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
+ initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) })
initLocalStorage(ctx, c)
- initAndLog(c, "storage engine", func(c *cfg) {
+ initAndLog(ctx, c, "storage engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
})
- initAndLog(c, "gRPC", initGRPC)
- initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
+ initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
+ initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
+ initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) })
initAccessPolicyEngine(ctx, c)
- initAndLog(c, "access policy engine", func(c *cfg) {
+ initAndLog(ctx, c, "access policy engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
})
- initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
- initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
- initAndLog(c, "session", initSessionService)
- initAndLog(c, "object", initObjectService)
- initAndLog(c, "tree", initTreeService)
- initAndLog(c, "apemanager", initAPEManagerService)
- initAndLog(c, "control", initControlService)
+ initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
+ initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) })
+ initAndLog(ctx, c, "session", initSessionService)
+ initAndLog(ctx, c, "object", initObjectService)
+ initAndLog(ctx, c, "tree", initTreeService)
+ initAndLog(ctx, c, "apemanager", initAPEManagerService)
+ initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
- initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
+ initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
+
+ initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
}
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
- c.log.Info(fmt.Sprintf("starting %s service...", name))
+ c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
starter(ctx, c)
if logSuccess {
- c.log.Info(name + " service started successfully")
+ c.log.Info(ctx, name+" service started successfully")
}
}
-func stopAndLog(c *cfg, name string, stopper func() error) {
- c.log.Debug(fmt.Sprintf("shutting down %s service", name))
+func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) {
+ c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name))
- err := stopper()
+ err := stopper(ctx)
if err != nil {
- c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
- zap.String("error", err.Error()),
+ c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
+ zap.Error(err),
)
}
- c.log.Debug(name + " service has been stopped")
+ c.log.Debug(ctx, name+" service has been stopped")
}
func bootUp(ctx context.Context, c *cfg) {
- runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
+ runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) })
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
- bootstrapNode(c)
+ bootstrapNode(ctx, c)
startWorkers(ctx, c)
}
func wait(c *cfg) {
- c.log.Info(logs.CommonApplicationStarted,
+ c.log.Info(context.Background(), logs.CommonApplicationStarted,
zap.String("version", misc.Version))
<-c.done // graceful shutdown
@@ -160,12 +163,12 @@ func wait(c *cfg) {
go func() {
defer drain.Done()
for err := range c.internalErr {
- c.log.Warn(logs.FrostFSNodeInternalApplicationError,
+ c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
}
}()
- c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
+ c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
c.wg.Wait()
diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go
index 19b4af51f..d9ca01e70 100644
--- a/cmd/frostfs-node/metrics.go
+++ b/cmd/frostfs-node/metrics.go
@@ -8,38 +8,38 @@ import (
func metricsComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
- if c.dynamicConfiguration.metrics == nil {
- c.dynamicConfiguration.metrics = new(httpComponent)
- c.dynamicConfiguration.metrics.cfg = c
- c.dynamicConfiguration.metrics.name = "metrics"
- c.dynamicConfiguration.metrics.handler = metrics.Handler()
+ if c.metrics == nil {
+ c.metrics = new(httpComponent)
+ c.metrics.cfg = c
+ c.metrics.name = "metrics"
+ c.metrics.handler = metrics.Handler()
updated = true
}
// (re)init read configuration
enabled := metricsconfig.Enabled(c.appCfg)
- if enabled != c.dynamicConfiguration.metrics.enabled {
- c.dynamicConfiguration.metrics.enabled = enabled
+ if enabled != c.metrics.enabled {
+ c.metrics.enabled = enabled
updated = true
}
address := metricsconfig.Address(c.appCfg)
- if address != c.dynamicConfiguration.metrics.address {
- c.dynamicConfiguration.metrics.address = address
+ if address != c.metrics.address {
+ c.metrics.address = address
updated = true
}
dur := metricsconfig.ShutdownTimeout(c.appCfg)
- if dur != c.dynamicConfiguration.metrics.shutdownDur {
- c.dynamicConfiguration.metrics.shutdownDur = dur
+ if dur != c.metrics.shutdownDur {
+ c.metrics.shutdownDur = dur
updated = true
}
- return c.dynamicConfiguration.metrics, updated
+ return c.metrics, updated
}
func enableMetricsSvc(c *cfg) {
- c.shared.metricsSvc.Enable()
+ c.metricsSvc.Enable()
}
func disableMetricsSvc(c *cfg) {
- c.shared.metricsSvc.Disable()
+ c.metricsSvc.Disable()
}
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 7178cd97d..917cf6fc0 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -14,6 +14,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -23,12 +24,56 @@ import (
const (
newEpochNotification = "NewEpoch"
-
- // amount of tries(blocks) before notary deposit timeout.
- notaryDepositRetriesAmount = 300
)
-func initMorphComponents(ctx context.Context, c *cfg) {
+func (c *cfg) initMorphComponents(ctx context.Context) {
+ c.cfgMorph.guard.Lock()
+ defer c.cfgMorph.guard.Unlock()
+ if c.cfgMorph.initialized {
+ return
+ }
+ initMorphClient(ctx, c)
+
+ lookupScriptHashesInNNS(c) // smart contract auto negotiation
+
+ err := c.cfgMorph.client.EnableNotarySupport(
+ client.WithProxyContract(
+ c.cfgMorph.proxyScriptHash,
+ ),
+ )
+ fatalOnErr(err)
+
+ c.log.Info(ctx, logs.FrostFSNodeNotarySupport)
+
+ wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0)
+ fatalOnErr(err)
+
+ var netmapSource netmap.Source
+
+ c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg)
+ c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
+
+ if c.cfgMorph.cacheTTL == 0 {
+ msPerBlock, err := c.cfgMorph.client.MsPerBlock()
+ fatalOnErr(err)
+ c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
+ c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
+ }
+
+ if c.cfgMorph.cacheTTL < 0 {
+ netmapSource = newRawNetmapStorage(wrap)
+ } else {
+ // use RPC node as source of netmap (with caching)
+ netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg,
+ morphconfig.NetmapCandidatesPollInterval(c.appCfg))
+ }
+
+ c.netMapSource = netmapSource
+ c.cfgNetmap.wrapper = wrap
+ c.cfgMorph.initialized = true
+}
+
+func initMorphClient(ctx context.Context, c *cfg) {
addresses := morphconfig.RPCEndpoint(c.appCfg)
// Morph client stable-sorts endpoints by priority. Shuffle here to randomize
@@ -40,7 +85,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
cli, err := client.New(ctx,
c.key,
client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)),
- client.WithLogger(c.log),
+ client.WithLogger(c.log.WithTag(logger.TagMorph)),
client.WithMetrics(c.metricsCollector.MorphClientMetrics()),
client.WithEndpoints(addresses...),
client.WithConnLostCallback(func() {
@@ -48,90 +93,46 @@ func initMorphComponents(ctx context.Context, c *cfg) {
}),
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()),
+ client.WithDialerSource(c.dialerSource),
)
if err != nil {
- c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
+ c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
fatalOnErr(err)
}
c.onShutdown(func() {
- c.log.Info(logs.FrostFSNodeClosingMorphComponents)
+ c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
cli.Close()
})
if err := cli.SetGroupSignerScope(); err != nil {
- c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
+ c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
}
c.cfgMorph.client = cli
- c.cfgMorph.notaryEnabled = cli.ProbeNotary()
-
- lookupScriptHashesInNNS(c) // smart contract auto negotiation
-
- if c.cfgMorph.notaryEnabled {
- err = c.cfgMorph.client.EnableNotarySupport(
- client.WithProxyContract(
- c.cfgMorph.proxyScriptHash,
- ),
- )
- fatalOnErr(err)
- }
-
- c.log.Info(logs.FrostFSNodeNotarySupport,
- zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
- )
-
- wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
- fatalOnErr(err)
-
- var netmapSource netmap.Source
-
- c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
-
- if c.cfgMorph.cacheTTL == 0 {
- msPerBlock, err := c.cfgMorph.client.MsPerBlock()
- fatalOnErr(err)
- c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
- c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
- }
-
- if c.cfgMorph.cacheTTL < 0 {
- netmapSource = wrap
- } else {
- // use RPC node as source of netmap (with caching)
- netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap)
- }
-
- c.netMapSource = netmapSource
- c.cfgNetmap.wrapper = wrap
}
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
- // skip notary deposit in non-notary environments
- if !c.cfgMorph.notaryEnabled {
- return
- }
-
- tx, err := makeNotaryDeposit(c)
+ tx, vub, err := makeNotaryDeposit(ctx, c)
fatalOnErr(err)
if tx.Equals(util.Uint256{}) {
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
- c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
+ c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
return
}
- err = waitNotaryDeposit(ctx, c, tx)
+ err = waitNotaryDeposit(ctx, c, tx, vub)
fatalOnErr(err)
}
-func makeNotaryDeposit(c *cfg) (util.Uint256, error) {
+func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) {
const (
// gasMultiplier defines how many times more the notary
// balance must be compared to the GAS balance of the node:
@@ -145,43 +146,19 @@ func makeNotaryDeposit(c *cfg) (util.Uint256, error) {
depositAmount, err := client.CalculateNotaryDepositAmount(c.cfgMorph.client, gasMultiplier, gasDivisor)
if err != nil {
- return util.Uint256{}, fmt.Errorf("could not calculate notary deposit: %w", err)
+ return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err)
}
- return c.cfgMorph.client.DepositEndlessNotary(depositAmount)
+ return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount)
}
-var (
- errNotaryDepositFail = errors.New("notary deposit tx has faulted")
- errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network")
-)
-
-func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
- for range notaryDepositRetriesAmount {
- c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted)
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- ok, err := c.cfgMorph.client.TxHalt(tx)
- if err == nil {
- if ok {
- c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
- return nil
- }
-
- return errNotaryDepositFail
- }
-
- err = c.cfgMorph.client.Wait(ctx, 1)
- if err != nil {
- return fmt.Errorf("could not wait for one block in chain: %w", err)
- }
+func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
+ if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
+ return err
}
- return errNotaryDepositTimeout
+ c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
+ return nil
}
func listenMorphNotifications(ctx context.Context, c *cfg) {
@@ -189,22 +166,23 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
err error
subs subscriber.Subscriber
)
+ log := c.log.WithTag(logger.TagMorph)
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
- c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
+ c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
- Log: c.log,
+ Log: log,
StartFromBlock: fromSideChainBlock,
Client: c.cfgMorph.client,
})
fatalOnErr(err)
lis, err := event.NewListener(event.ListenerParams{
- Logger: c.log,
+ Logger: log,
Subscriber: subs,
})
fatalOnErr(err)
@@ -222,7 +200,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
res, err := netmapEvent.ParseNewEpoch(src)
if err == nil {
- c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
+ log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
)
}
@@ -232,12 +210,12 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers)
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
- registerBlockHandler(lis, func(block *block.Block) {
- c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
+ registerBlockHandler(lis, func(ctx context.Context, block *block.Block) {
+ log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
if err != nil {
- c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
+ log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", block.Index))
}
@@ -248,27 +226,17 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
subs map[event.Type][]event.Handler,
) {
for typ, handlers := range subs {
- pi := event.NotificationParserInfo{}
- pi.SetType(typ)
- pi.SetScriptHash(scHash)
-
p, ok := parsers[typ]
if !ok {
panic(fmt.Sprintf("missing parser for event %s", typ))
}
- pi.SetParser(p)
-
- lis.SetNotificationParser(pi)
-
- for _, h := range handlers {
- hi := event.NotificationHandlerInfo{}
- hi.SetType(typ)
- hi.SetScriptHash(scHash)
- hi.SetHandler(h)
-
- lis.RegisterNotificationHandler(hi)
- }
+ lis.RegisterNotificationHandler(event.NotificationHandlerInfo{
+ Contract: scHash,
+ Type: typ,
+ Parser: p,
+ Handlers: handlers,
+ })
}
}
@@ -297,10 +265,6 @@ func lookupScriptHashesInNNS(c *cfg) {
)
for _, t := range targets {
- if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
- continue // ignore proxy contract if notary disabled
- }
-
if emptyHash.Equals(*t.h) {
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 8104b1dc1..7dfb4fe12 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -8,7 +8,7 @@ import (
"net"
"sync/atomic"
- netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -19,6 +19,7 @@ import (
netmapTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/netmap/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
netmapService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap"
+ netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"go.uber.org/zap"
@@ -61,13 +62,15 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
if ni != nil {
s.nodeInfo.Store(*ni)
- switch {
- case ni.IsOnline():
+ switch ni.Status() {
+ case netmapSDK.Online:
ctrlNetSt = control.NetmapStatus_ONLINE
- case ni.IsOffline():
+ case netmapSDK.Offline:
ctrlNetSt = control.NetmapStatus_OFFLINE
- case ni.IsMaintenance():
+ case netmapSDK.Maintenance:
ctrlNetSt = control.NetmapStatus_MAINTENANCE
+ case netmapSDK.UnspecifiedState:
+ ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED
}
} else {
ctrlNetSt = control.NetmapStatus_OFFLINE
@@ -78,13 +81,13 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
// nil ni means that the node is not included
// in the netmap
- niOld.SetOffline()
+ niOld.SetStatus(netmapSDK.Offline)
s.nodeInfo.Store(niOld)
}
}
- s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
+ s.setControlNetmapStatus(ctrlNetSt)
}
// sets the current node state to the given value. Subsequent cfg.bootstrap
@@ -102,9 +105,7 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) {
v := s.nodeInfo.Load()
if v != nil {
res, ok = v.(netmapSDK.NodeInfo)
- if !ok {
- panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v))
- }
+ assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v))
}
return
@@ -122,7 +123,11 @@ func nodeKeyFromNetmap(c *cfg) []byte {
func (c *cfg) iterateNetworkAddresses(f func(string) bool) {
ni, ok := c.cfgNetmap.state.getNodeInfo()
if ok {
- ni.IterateNetworkEndpoints(f)
+ for s := range ni.NetworkEndpoints() {
+ if f(s) {
+ return
+ }
+ }
}
}
@@ -139,13 +144,11 @@ func initNetmapService(ctx context.Context, c *cfg) {
network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo)
c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes())
parseAttributes(c)
- c.cfgNodeInfo.localInfo.SetOffline()
+ c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline)
- if c.cfgMorph.client == nil {
- initMorphComponents(ctx, c)
- }
+ c.initMorphComponents(ctx)
- initNetmapState(c)
+ initNetmapState(ctx, c)
server := netmapTransportGRPC.New(
netmapService.NewSignService(
@@ -166,53 +169,52 @@ func initNetmapService(ctx context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
netmapGRPC.RegisterNetmapServiceServer(s, server)
+
+ // TODO(@aarifullin): #1487 remove the dual service support.
+ s.RegisterService(frostFSServiceDesc(netmapGRPC.NetmapService_ServiceDesc), server)
})
addNewEpochNotificationHandlers(c)
}
func addNewEpochNotificationHandlers(c *cfg) {
- addNewEpochNotificationHandler(c, func(ev event.Event) {
+ addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber())
})
- addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
+ addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
e := ev.(netmapEvent.NewEpoch).EpochNumber()
- c.updateContractNodeInfo(e)
+ c.updateContractNodeInfo(ctx, e)
- if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
+ if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
return
}
- if err := c.bootstrap(); err != nil {
- c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
+ if err := c.bootstrap(ctx); err != nil {
+ c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
}
})
- if c.cfgMorph.notaryEnabled {
- addNewEpochAsyncNotificationHandler(c, func(_ event.Event) {
- _, err := makeNotaryDeposit(c)
- if err != nil {
- c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
- zap.String("error", err.Error()),
- )
- }
- })
- }
+ addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
+ _, _, err := makeNotaryDeposit(ctx, c)
+ if err != nil {
+ c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
+ zap.Error(err),
+ )
+ }
+ })
}
// bootstrapNode adds current node to the Network map.
// Must be called after initNetmapService.
-func bootstrapNode(c *cfg) {
- if c.needBootstrap() {
- if c.IsMaintenance() {
- c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
- return
- }
- err := c.bootstrap()
- fatalOnErrDetails("bootstrap error", err)
+func bootstrapNode(ctx context.Context, c *cfg) {
+ if c.IsMaintenance() {
+ c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
+ return
}
+ err := c.bootstrap(ctx)
+ fatalOnErrDetails("bootstrap error", err)
}
func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) {
@@ -237,46 +239,47 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state.
// Must be called after Morph components initialization.
-func initNetmapState(c *cfg) {
- epoch, err := c.cfgNetmap.wrapper.Epoch()
+func initNetmapState(ctx context.Context, c *cfg) {
+ epoch, err := c.cfgNetmap.wrapper.Epoch(ctx)
fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo
- ni, err = c.netmapInitLocalNodeState(epoch)
+ ni, err = c.netmapInitLocalNodeState(ctx, epoch)
fatalOnErrDetails("could not init network state", err)
stateWord := nodeState(ni)
- c.log.Info(logs.FrostFSNodeInitialNetworkState,
+ c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState,
zap.Uint64("epoch", epoch),
zap.String("state", stateWord),
)
- if ni != nil && ni.IsMaintenance() {
+ if ni != nil && ni.Status().IsMaintenance() {
c.isMaintenance.Store(true)
}
c.cfgNetmap.state.setCurrentEpoch(epoch)
- c.cfgNetmap.startEpoch = epoch
c.setContractNodeInfo(ni)
}
func nodeState(ni *netmapSDK.NodeInfo) string {
if ni != nil {
- switch {
- case ni.IsOnline():
+ switch ni.Status() {
+ case netmapSDK.Online:
return "online"
- case ni.IsOffline():
+ case netmapSDK.Offline:
return "offline"
- case ni.IsMaintenance():
+ case netmapSDK.Maintenance:
return "maintenance"
+ case netmapSDK.UnspecifiedState:
+ return "undefined"
}
}
return "undefined"
}
-func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
- nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
+func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
+ nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx)
if err != nil {
return nil, err
}
@@ -289,7 +292,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
}
}
- node, err := c.netmapLocalNodeState(epoch)
+ node, err := c.netmapLocalNodeState(ctx, epoch)
if err != nil {
return nil, err
}
@@ -303,16 +306,16 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
if nmState != candidateState {
// This happens when the node was switched to maintenance without epoch tick.
// We expect it to continue staying in maintenance.
- c.log.Info(logs.CandidateStatusPriority,
+ c.log.Info(ctx, logs.CandidateStatusPriority,
zap.String("netmap", nmState),
zap.String("candidate", candidateState))
}
return candidate, nil
}
-func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
+func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
// calculate current network state
- nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch)
+ nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch)
if err != nil {
return nil, err
}
@@ -347,35 +350,29 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) {
)
}
-var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode")
-
-func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
+func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error {
switch st {
default:
return fmt.Errorf("unsupported status %v", st)
case control.NetmapStatus_MAINTENANCE:
- return c.setMaintenanceStatus(false)
+ return c.setMaintenanceStatus(ctx, false)
case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE:
}
- c.stopMaintenance()
-
- if !c.needBootstrap() {
- return errRelayBootstrap
- }
+ c.stopMaintenance(ctx)
if st == control.NetmapStatus_ONLINE {
c.cfgNetmap.reBoostrapTurnedOff.Store(false)
- return bootstrapOnline(c)
+ return bootstrapOnline(ctx, c)
}
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
- return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {})
+ return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
}
-func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
- epoch, err := c.netMapSource.Epoch()
+func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) {
+ epoch, err := c.netMapSource.Epoch(ctx)
if err != nil {
return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err)
}
@@ -383,12 +380,12 @@ func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
return st, epoch, nil
}
-func (c *cfg) ForceMaintenance() error {
- return c.setMaintenanceStatus(true)
+func (c *cfg) ForceMaintenance(ctx context.Context) error {
+ return c.setMaintenanceStatus(ctx, true)
}
-func (c *cfg) setMaintenanceStatus(force bool) error {
- netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
+func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
+ netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx)
if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
} else if !netSettings.MaintenanceModeAllowed {
@@ -396,10 +393,10 @@ func (c *cfg) setMaintenanceStatus(force bool) error {
}
if err == nil || force {
- c.startMaintenance()
+ c.startMaintenance(ctx)
if err == nil {
- err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance)
+ err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance)
}
if err != nil {
@@ -412,13 +409,16 @@ func (c *cfg) setMaintenanceStatus(force bool) error {
// calls UpdatePeerState operation of Netmap contract's client for the local node.
// State setter is used to specify node state to switch to.
-func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error {
+func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error {
var prm nmClient.UpdatePeerPrm
prm.SetKey(c.key.PublicKey().Bytes())
stateSetter(&prm)
- _, err := c.cfgNetmap.wrapper.UpdatePeerState(prm)
- return err
+ res, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm)
+ if err != nil {
+ return err
+ }
+ return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
}
type netInfo struct {
@@ -433,7 +433,7 @@ type netInfo struct {
msPerBlockRdr func() (int64, error)
}
-func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
+func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) {
magic, err := n.magic.MagicNumber()
if err != nil {
return nil, err
@@ -443,7 +443,7 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
ni.SetCurrentEpoch(n.netState.CurrentEpoch())
ni.SetMagicNumber(magic)
- netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
+ netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx)
if err != nil {
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
}
diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go
new file mode 100644
index 000000000..e6be9cdf5
--- /dev/null
+++ b/cmd/frostfs-node/netmap_source.go
@@ -0,0 +1,55 @@
+package main
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
+
+type rawNetmapSource struct {
+ client *netmapClient.Client
+}
+
+func newRawNetmapStorage(client *netmapClient.Client) netmap.Source {
+ return &rawNetmapSource{
+ client: client,
+ }
+}
+
+func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
+ nm, err := s.client.GetNetMap(ctx, diff)
+ if err != nil {
+ return nil, err
+ }
+ candidates, err := s.client.GetCandidates(ctx)
+ if err != nil {
+ return nil, err
+ }
+ updates := getNetMapNodesToUpdate(nm, candidates)
+ if len(updates) > 0 {
+ mergeNetmapWithCandidates(updates, nm)
+ }
+ return nm, nil
+}
+
+func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
+ nm, err := s.client.GetNetMapByEpoch(ctx, epoch)
+ if err != nil {
+ return nil, err
+ }
+ candidates, err := s.client.GetCandidates(ctx)
+ if err != nil {
+ return nil, err
+ }
+ updates := getNetMapNodesToUpdate(nm, candidates)
+ if len(updates) > 0 {
+ mergeNetmapWithCandidates(updates, nm)
+ }
+ return nm, nil
+}
+
+func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) {
+ return s.client.Epoch(ctx)
+}
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 610e2c363..c33c02b3f 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -2,12 +2,9 @@ package main
import (
"context"
- "errors"
"fmt"
"net"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
@@ -16,13 +13,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl"
- v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
@@ -38,8 +31,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -60,22 +54,22 @@ type objectSvc struct {
patch *patchsvc.Service
}
-func (c *cfg) MaxObjectSize() uint64 {
- sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
+func (c *cfg) MaxObjectSize(ctx context.Context) uint64 {
+ sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx)
if err != nil {
- c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
- zap.String("error", err.Error()),
+ c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
+ zap.Error(err),
)
}
return sz
}
-func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
+func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) {
return s.put.Put()
}
-func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) {
+func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) {
return s.patch.Patch()
}
@@ -109,16 +103,15 @@ func (s *objectSvc) GetRangeHash(ctx context.Context, req *object.GetRangeHashRe
type delNetInfo struct {
netmap.State
- tsLifetime uint64
cfg *cfg
}
func (i *delNetInfo) TombstoneLifetime() (uint64, error) {
- return i.tsLifetime, nil
+ return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
}
-// returns node owner ID calculated from configured private key.
+// LocalNodeID returns node owner ID calculated from configured private key.
//
// Implements method needed for Object.Delete service.
func (i *delNetInfo) LocalNodeID() user.ID {
@@ -129,8 +122,8 @@ type innerRingFetcherWithNotary struct {
sidechain *morphClient.Client
}
-func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
- keys, err := fn.sidechain.NeoFSAlphabetList()
+func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) {
+ keys, err := fn.sidechain.NeoFSAlphabetList(ctx)
if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err)
}
@@ -143,24 +136,6 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
return result, nil
}
-type innerRingFetcherWithoutNotary struct {
- nm *nmClient.Client
-}
-
-func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
- keys, err := f.nm.GetInnerRingList()
- if err != nil {
- return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
- }
-
- result := make([][]byte, 0, len(keys))
- for i := range keys {
- result = append(result, keys[i].Bytes())
- }
-
- return result, nil
-}
-
func initObjectService(c *cfg) {
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
@@ -175,11 +150,12 @@ func initObjectService(c *cfg) {
sPutV2 := createPutSvcV2(sPut, keyStorage)
- sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache)
+ sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource)
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
- sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource)
+ sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource,
+ c.ObjectCfg.priorityMetrics)
*c.cfgObject.getSvc = *sGet // need smth better
@@ -192,16 +168,14 @@ func initObjectService(c *cfg) {
sPatch := createPatchSvc(sGet, sPut)
// build service pipeline
- // grpc | audit | | signature | response | acl | ape | split
+ // grpc | audit | qos | | signature | response | acl | ape | split
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
- apeSvc := createAPEService(c, splitSvc)
-
- aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
+ apeSvc := createAPEService(c, &irFetcher, splitSvc)
var commonSvc objectService.Common
- commonSvc.Init(&c.internals, aclSvc)
+ commonSvc.Init(&c.internals, apeSvc)
respSvc := objectService.NewResponseService(
&commonSvc,
@@ -213,19 +187,23 @@ func initObjectService(c *cfg) {
respSvc,
)
- c.shared.metricsSvc = objectService.NewMetricCollector(
+ c.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
- auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit)
+ qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService)
+ auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
objectGRPC.RegisterObjectServiceServer(s, server)
+
+ // TODO(@aarifullin): #1487 remove the dual service support.
+ s.RegisterService(frostFSServiceDesc(objectGRPC.ObjectService_ServiceDesc), server)
})
}
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
if policerconfig.UnsafeDisable(c.appCfg) {
- c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
+ c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
return
}
@@ -236,14 +214,12 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr)
prm.WithForceRemoval()
- _, err := ls.Inhume(ctx, prm)
- return err
+ return ls.Inhume(ctx, prm)
}
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
-
pol := policer.New(
- policer.WithLogger(c.log),
+ policer.WithLogger(c.log.WithTag(logger.TagPolicer)),
policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}),
policer.WithBuryFunc(buryFn),
policer.WithContainerSource(c.cfgObject.cnrSource),
@@ -287,10 +263,9 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr)
- _, err := ls.Inhume(ctx, inhumePrm)
- if err != nil {
- c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
- zap.String("error", err.Error()),
+ if err := ls.Inhume(ctx, inhumePrm); err != nil {
+ c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
+ zap.Error(err),
)
}
}),
@@ -306,14 +281,9 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
})
}
-func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
- if c.cfgMorph.client.ProbeNotary() {
- return &innerRingFetcherWithNotary{
- sidechain: c.cfgMorph.client,
- }
- }
- return &innerRingFetcherWithoutNotary{
- nm: c.cfgNetmap.wrapper,
+func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
+ return &innerRingFetcherWithNotary{
+ sidechain: c.cfgMorph.client,
}
}
@@ -321,7 +291,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
ls := c.cfgObject.cfgLocalStorage.localStorage
return replicator.New(
- replicator.WithLogger(c.log),
+ replicator.WithLogger(c.log.WithTag(logger.TagReplicator)),
replicator.WithPutTimeout(
replicatorconfig.PutTimeout(c.appCfg),
),
@@ -353,7 +323,6 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c,
c.cfgNetmap.state,
irFetcher,
- objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
objectwriter.WithLogger(c.log),
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
)
@@ -367,7 +336,7 @@ func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Servic
return patchsvc.NewService(sPut.Config, sGet)
}
-func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
+func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source) *searchsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return searchsvc.New(
@@ -378,7 +347,8 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav
),
c.netMapSource,
keyStorage,
- searchsvc.WithLogger(c.log),
+ containerSource,
+ searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)),
)
}
@@ -389,6 +359,7 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage)
func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator,
coreConstructor *cache.ClientCache,
containerSource containercore.Source,
+ priorityMetrics []placement.Metric,
) *getsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
@@ -398,10 +369,12 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra
ls,
traverseGen.WithTraverseOptions(
placement.SuccessAfter(1),
+ placement.WithPriorityMetrics(priorityMetrics),
+ placement.WithNodeState(c),
),
coreConstructor,
containerSource,
- getsvc.WithLogger(c.log))
+ getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc)))
}
func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service {
@@ -412,7 +385,7 @@ func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorag
c.netMapSource,
c,
c.cfgObject.cnrSource,
- getsvcV2.WithLogger(c.log),
+ getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)),
)
}
@@ -424,13 +397,12 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi
sSearch,
sPut,
&delNetInfo{
- State: c.cfgNetmap.state,
- tsLifetime: c.cfgObject.tombstoneLifetime,
+ State: c.cfgNetmap.state,
cfg: c,
},
keyStorage,
- deletesvc.WithLogger(c.log),
+ deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)),
)
}
@@ -454,63 +426,23 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
)
}
-func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
- ls := c.cfgObject.cfgLocalStorage.localStorage
-
- return v2.New(
- apeSvc,
- c.netMapSource,
- irFetcher,
- acl.NewChecker(
- c.cfgNetmap.state,
- c.cfgObject.eaclSource,
- eaclSDK.NewValidator(),
- ls),
- c.cfgObject.cnrSource,
- v2.WithLogger(c.log),
- )
-}
-
-func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
+func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService(
- c.log,
objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc),
- c.shared.frostfsidClient,
+ c.frostfsidClient,
c.netMapSource,
c.cfgNetmap.state,
c.cfgObject.cnrSource,
c.binPublicKey,
),
+ objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
splitSvc,
)
}
-type morphEACLFetcher struct {
- w *cntClient.Client
-}
-
-func (s *morphEACLFetcher) GetEACL(cnr cid.ID) (*containercore.EACL, error) {
- eaclInfo, err := s.w.GetEACL(cnr)
- if err != nil {
- return nil, err
- }
-
- binTable, err := eaclInfo.Value.Marshal()
- if err != nil {
- return nil, fmt.Errorf("marshal eACL table: %w", err)
- }
-
- if !eaclInfo.Signature.Verify(binTable) {
- // TODO(@cthulhu-rider): #468 use "const" error
- return nil, errors.New("invalid signature of the eACL table")
- }
-
- return eaclInfo, nil
-}
-
type engineWithoutNotifications struct {
engine *engine.StorageEngine
}
@@ -530,14 +462,13 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...)
- _, err := e.engine.Inhume(ctx, prm)
- return err
+ return e.engine.Inhume(ctx, prm)
}
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
-func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
- return engine.Put(ctx, e.engine, o)
+func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexedContainer bool) error {
+ return engine.Put(ctx, e.engine, o, indexedContainer)
}
diff --git a/cmd/frostfs-node/policy_engine.go b/cmd/frostfs-node/policy_engine.go
index 22fda2b4c..55f76cc76 100644
--- a/cmd/frostfs-node/policy_engine.go
+++ b/cmd/frostfs-node/policy_engine.go
@@ -21,7 +21,9 @@ type accessPolicyEngine struct {
var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil)
type morphAPEChainCacheKey struct {
- name chain.Name
+ // nolint:unused
+ name chain.Name
+ // nolint:unused
target engine.Target
}
diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go
index dcd320146..e4da8119f 100644
--- a/cmd/frostfs-node/pprof.go
+++ b/cmd/frostfs-node/pprof.go
@@ -1,49 +1,50 @@
package main
import (
+ "context"
"runtime"
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
)
-func initProfilerService(c *cfg) {
+func initProfilerService(ctx context.Context, c *cfg) {
tuneProfilers(c)
pprof, _ := pprofComponent(c)
- pprof.init(c)
+ pprof.init(ctx, c)
}
func pprofComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
- if c.dynamicConfiguration.pprof == nil {
- c.dynamicConfiguration.pprof = new(httpComponent)
- c.dynamicConfiguration.pprof.cfg = c
- c.dynamicConfiguration.pprof.name = "pprof"
- c.dynamicConfiguration.pprof.handler = httputil.Handler()
- c.dynamicConfiguration.pprof.preReload = tuneProfilers
+ if c.pprof == nil {
+ c.pprof = new(httpComponent)
+ c.pprof.cfg = c
+ c.pprof.name = "pprof"
+ c.pprof.handler = httputil.Handler()
+ c.pprof.preReload = tuneProfilers
updated = true
}
// (re)init read configuration
enabled := profilerconfig.Enabled(c.appCfg)
- if enabled != c.dynamicConfiguration.pprof.enabled {
- c.dynamicConfiguration.pprof.enabled = enabled
+ if enabled != c.pprof.enabled {
+ c.pprof.enabled = enabled
updated = true
}
address := profilerconfig.Address(c.appCfg)
- if address != c.dynamicConfiguration.pprof.address {
- c.dynamicConfiguration.pprof.address = address
+ if address != c.pprof.address {
+ c.pprof.address = address
updated = true
}
dur := profilerconfig.ShutdownTimeout(c.appCfg)
- if dur != c.dynamicConfiguration.pprof.shutdownDur {
- c.dynamicConfiguration.pprof.shutdownDur = dur
+ if dur != c.pprof.shutdownDur {
+ c.pprof.shutdownDur = dur
updated = true
}
- return c.dynamicConfiguration.pprof, updated
+ return c.pprof, updated
}
func tuneProfilers(c *cfg) {
diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go
new file mode 100644
index 000000000..6394b668b
--- /dev/null
+++ b/cmd/frostfs-node/qos.go
@@ -0,0 +1,108 @@
+package main
+
+import (
+ "bytes"
+ "context"
+
+ qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "go.uber.org/zap"
+)
+
+type cfgQoSService struct {
+ netmapSource netmap.Source
+ logger *logger.Logger
+ allowedCriticalPubs [][]byte
+ allowedInternalPubs [][]byte
+}
+
+func initQoSService(c *cfg) {
+ criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg)
+ internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg)
+ rawCriticalPubs := make([][]byte, 0, len(criticalPubs))
+ rawInternalPubs := make([][]byte, 0, len(internalPubs))
+ for i := range criticalPubs {
+ rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes())
+ }
+ for i := range internalPubs {
+ rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes())
+ }
+
+ c.cfgQoSService = cfgQoSService{
+ netmapSource: c.netMapSource,
+ logger: c.log,
+ allowedCriticalPubs: rawCriticalPubs,
+ allowedInternalPubs: rawInternalPubs,
+ }
+}
+
+func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
+ rawTag, defined := qosTagging.IOTagFromContext(ctx)
+ if !defined {
+ if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
+ return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
+ }
+ return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
+ }
+ ioTag, err := qos.FromRawString(rawTag)
+ if err != nil {
+ s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
+ return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
+ }
+
+ switch ioTag {
+ case qos.IOTagClient:
+ return ctx
+ case qos.IOTagCritical:
+ for _, pk := range s.allowedCriticalPubs {
+ if bytes.Equal(pk, requestSignPublicKey) {
+ return ctx
+ }
+ }
+ nm, err := s.netmapSource.GetNetMap(ctx, 0)
+ if err != nil {
+ s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
+ return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
+ }
+ for _, node := range nm.Nodes() {
+ if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
+ return ctx
+ }
+ }
+ s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
+ return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
+ case qos.IOTagInternal:
+ if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
+ return ctx
+ }
+ s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
+ return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
+ default:
+ s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
+ return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
+ }
+}
+
+func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
+ for _, pk := range s.allowedInternalPubs {
+ if bytes.Equal(pk, publicKey) {
+ return true
+ }
+ }
+ nm, err := s.netmapSource.GetNetMap(ctx, 0)
+ if err != nil {
+ s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
+ return false
+ }
+ for _, node := range nm.Nodes() {
+ if bytes.Equal(node.PublicKey(), publicKey) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go
new file mode 100644
index 000000000..971f9eebf
--- /dev/null
+++ b/cmd/frostfs-node/qos_test.go
@@ -0,0 +1,226 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+func TestQoSService_Client(t *testing.T) {
+ t.Parallel()
+ s, pk := testQoSServicePrepare(t)
+ t.Run("IO tag client defined", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.Request)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
+ ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
+ ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
+ ctx = s.AdjustIncomingTag(ctx, pk.Request)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
+ ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
+ ctx = s.AdjustIncomingTag(ctx, pk.Internal)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
+ ctx = s.AdjustIncomingTag(ctx, pk.Critical)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.Request)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.Critical)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.Request)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.Internal)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+}
+
+func TestQoSService_Internal(t *testing.T) {
+ t.Parallel()
+ s, pk := testQoSServicePrepare(t)
+ t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagInternal.String(), tag)
+ })
+ t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.Internal)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagInternal.String(), tag)
+ })
+ t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
+ ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagInternal.String(), tag)
+ })
+ t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
+ ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagInternal.String(), tag)
+ })
+}
+
+func TestQoSService_Critical(t *testing.T) {
+ t.Parallel()
+ s, pk := testQoSServicePrepare(t)
+ t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagCritical.String(), tag)
+ })
+ t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.Critical)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagCritical.String(), tag)
+ })
+}
+
+func TestQoSService_NetmapGetError(t *testing.T) {
+ t.Parallel()
+ s, pk := testQoSServicePrepare(t)
+ s.netmapSource = &utilTesting.TestNetmapSource{}
+ t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+ ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
+ ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+ t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
+ ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
+ ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
+ tag, ok := tagging.IOTagFromContext(ctx)
+ require.True(t, ok)
+ require.Equal(t, qos.IOTagClient.String(), tag)
+ })
+}
+
+func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
+ nmSigner, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ reqSigner, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ allowedCritSigner, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ allowedIntSigner, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ var node netmap.NodeInfo
+ node.SetPublicKey(nmSigner.PublicKey().Bytes())
+ nm := &netmap.NetMap{}
+ nm.SetEpoch(100)
+ nm.SetNodes([]netmap.NodeInfo{node})
+
+ return &cfgQoSService{
+ logger: test.NewLogger(t),
+ netmapSource: &utilTesting.TestNetmapSource{
+ Netmaps: map[uint64]*netmap.NetMap{
+ 100: nm,
+ },
+ CurrentEpoch: 100,
+ },
+ allowedCriticalPubs: [][]byte{
+ allowedCritSigner.PublicKey().Bytes(),
+ },
+ allowedInternalPubs: [][]byte{
+ allowedIntSigner.PublicKey().Bytes(),
+ },
+ },
+ &testQoSServicePublicKeys{
+ NetmapNode: nmSigner.PublicKey().Bytes(),
+ Request: reqSigner.PublicKey().Bytes(),
+ Internal: allowedIntSigner.PublicKey().Bytes(),
+ Critical: allowedCritSigner.PublicKey().Bytes(),
+ }
+}
+
+type testQoSServicePublicKeys struct {
+ NetmapNode []byte
+ Request []byte
+ Internal []byte
+ Critical []byte
+}
diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go
index d858ba490..f6d398574 100644
--- a/cmd/frostfs-node/runtime.go
+++ b/cmd/frostfs-node/runtime.go
@@ -1,6 +1,7 @@
package main
import (
+ "context"
"os"
"runtime/debug"
@@ -9,17 +10,17 @@ import (
"go.uber.org/zap"
)
-func setRuntimeParameters(c *cfg) {
+func setRuntimeParameters(ctx context.Context, c *cfg) {
if len(os.Getenv("GOMEMLIMIT")) != 0 {
// default limit < yaml limit < app env limit < GOMEMLIMIT
- c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
+ c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
return
}
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
previous := debug.SetMemoryLimit(memLimitBytes)
if memLimitBytes != previous {
- c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
+ c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated,
zap.Int64("new_value", memLimitBytes),
zap.Int64("old_value", previous))
}
diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go
index ee21ec230..fbfe3f5e6 100644
--- a/cmd/frostfs-node/session.go
+++ b/cmd/frostfs-node/session.go
@@ -6,8 +6,6 @@ import (
"net"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@@ -16,6 +14,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"google.golang.org/grpc"
)
@@ -48,18 +49,21 @@ func initSessionService(c *cfg) {
_ = c.privateTokenStore.Close()
})
- addNewEpochNotificationHandler(c, func(ev event.Event) {
+ addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber())
})
server := sessionTransportGRPC.New(
sessionSvc.NewSignService(
&c.key.PrivateKey,
- sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log),
+ sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)),
),
)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
sessionGRPC.RegisterSessionServiceServer(s, server)
+
+ // TODO(@aarifullin): #1487 remove the dual service support.
+ s.RegisterService(frostFSServiceDesc(sessionGRPC.SessionService_ServiceDesc), server)
})
}
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
index f550dd882..65f5aec15 100644
--- a/cmd/frostfs-node/tracing.go
+++ b/cmd/frostfs-node/tracing.go
@@ -13,12 +13,12 @@ import (
func initTracing(ctx context.Context, c *cfg) {
conf, err := tracingconfig.ToTracingConfig(c.appCfg)
if err != nil {
- c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
return
}
_, err = tracing.Setup(ctx, *conf)
if err != nil {
- c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
return
}
@@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) {
defer cancel()
err := tracing.Shutdown(ctx) // cfg context cancels before close
if err != nil {
- c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
}
},
})
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index d22e510de..62af45389 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -14,6 +14,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
"google.golang.org/grpc"
@@ -29,49 +30,50 @@ type cnrSource struct {
cli *containerClient.Client
}
-func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
- return c.src.Get(id)
+func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
+ return c.src.Get(ctx, id)
}
-func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
- return c.src.DeletionInfo(cid)
+func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) {
+ return c.src.DeletionInfo(ctx, cid)
}
-func (c cnrSource) List() ([]cid.ID, error) {
- return c.cli.ContainersOf(nil)
+func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) {
+ return c.cli.ContainersOf(ctx, nil)
}
func initTreeService(c *cfg) {
treeConfig := treeconfig.Tree(c.appCfg)
if !treeConfig.Enabled() {
- c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
+ c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
return
}
c.treeService = tree.New(
tree.WithContainerSource(cnrSource{
src: c.cfgObject.cnrSource,
- cli: c.shared.cnrClient,
+ cli: c.cnrClient,
}),
- tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
- tree.WithEACLSource(c.cfgObject.eaclSource),
+ tree.WithFrostfsidSubjectProvider(c.frostfsidClient),
tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey),
- tree.WithLogger(c.log),
+ tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)),
tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage),
tree.WithContainerCacheSize(treeConfig.CacheSize()),
tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()),
tree.WithReplicationChannelCapacity(treeConfig.ReplicationChannelCapacity()),
tree.WithReplicationWorkerCount(treeConfig.ReplicationWorkerCount()),
+ tree.WithSyncBatchSize(treeConfig.SyncBatchSize()),
tree.WithAuthorizedKeys(treeConfig.AuthorizedKeys()),
tree.WithMetrics(c.metricsCollector.TreeService()),
tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()),
tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()),
tree.WithNetmapState(c.cfgNetmap.state),
+ tree.WithDialerSource(c.dialerSource),
)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- tree.RegisterTreeServiceServer(s, c.treeService)
+ tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService))
})
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
@@ -79,10 +81,10 @@ func initTreeService(c *cfg) {
}))
if d := treeConfig.SyncInterval(); d == 0 {
- addNewEpochNotificationHandler(c, func(_ event.Event) {
+ addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) {
err := c.treeService.SynchronizeAll()
if err != nil {
- c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
+ c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
}
})
} else {
@@ -93,7 +95,7 @@ func initTreeService(c *cfg) {
for range tick.C {
err := c.treeService.SynchronizeAll()
if err != nil {
- c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
+ c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
if errors.Is(err, tree.ErrShuttingDown) {
return
}
@@ -102,17 +104,17 @@ func initTreeService(c *cfg) {
}()
}
- subscribeToContainerRemoval(c, func(e event.Event) {
+ subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
ev := e.(containerEvent.DeleteSuccess)
// This is executed asynchronously, so we don't care about the operation taking some time.
- c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
- err := c.treeService.DropTree(context.Background(), ev.ID, "")
+ c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
+ err := c.treeService.DropTree(ctx, ev.ID, "")
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
- c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
+ c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
})
diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go
index ae52b9e4a..22d2e0aa9 100644
--- a/cmd/frostfs-node/validate.go
+++ b/cmd/frostfs-node/validate.go
@@ -30,6 +30,11 @@ func validateConfig(c *config.Config) error {
return fmt.Errorf("invalid logger destination: %w", err)
}
+ err = loggerPrm.SetTags(loggerconfig.Tags(c))
+ if err != nil {
+ return fmt.Errorf("invalid list of allowed tags: %w", err)
+ }
+
// shard configuration validation
shardNum := 0
diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go
index d9c0f167f..495365cf0 100644
--- a/cmd/frostfs-node/validate_test.go
+++ b/cmd/frostfs-node/validate_test.go
@@ -1,7 +1,6 @@
package main
import (
- "os"
"path/filepath"
"testing"
@@ -22,17 +21,4 @@ func TestValidate(t *testing.T) {
require.NoError(t, err)
})
})
-
- t.Run("mainnet", func(t *testing.T) {
- os.Clearenv() // ENVs have priority over config files, so we do this in tests
- p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
- c := config.New(p, "", config.EnvPrefix)
- require.NoError(t, validateConfig(c))
- })
- t.Run("testnet", func(t *testing.T) {
- os.Clearenv() // ENVs have priority over config files, so we do this in tests
- p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
- c := config.New(p, "", config.EnvPrefix)
- require.NoError(t, validateConfig(c))
- })
}
diff --git a/cmd/internal/common/ape/commands.go b/cmd/internal/common/ape/commands.go
new file mode 100644
index 000000000..e5a35ab71
--- /dev/null
+++ b/cmd/internal/common/ape/commands.go
@@ -0,0 +1,167 @@
+package ape
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "github.com/nspcc-dev/neo-go/cli/input"
+ "github.com/spf13/cobra"
+)
+
+const (
+ defaultNamespace = "root"
+ namespaceTarget = "namespace"
+ containerTarget = "container"
+ userTarget = "user"
+ groupTarget = "group"
+
+ Ingress = "ingress"
+ S3 = "s3"
+)
+
+var mChainName = map[string]apechain.Name{
+ Ingress: apechain.Ingress,
+ S3: apechain.S3,
+}
+
+var (
+ errSettingDefaultValueWasDeclined = errors.New("setting default value was declined")
+ errUnknownTargetType = errors.New("unknown target type")
+ errUnsupportedChainName = errors.New("unsupported chain name")
+)
+
+// PrintHumanReadableAPEChain print APE chain rules.
+func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) {
+ cmd.Println("Chain ID: " + string(chain.ID))
+ cmd.Printf(" HEX: %x\n", chain.ID)
+ cmd.Println("Rules:")
+ for _, rule := range chain.Rules {
+ cmd.Println("\n\tStatus: " + rule.Status.String())
+ cmd.Println("\tAny: " + strconv.FormatBool(rule.Any))
+ cmd.Println("\tConditions:")
+ for _, c := range rule.Condition {
+ var ot string
+ switch c.Kind {
+ case apechain.KindResource:
+ ot = "Resource"
+ case apechain.KindRequest:
+ ot = "Request"
+ default:
+ panic("unknown object type")
+ }
+ cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value))
+ }
+ cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted))
+ for _, name := range rule.Actions.Names {
+ cmd.Println("\t\t" + name)
+ }
+ cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted))
+ for _, name := range rule.Resources.Names {
+ cmd.Println("\t\t" + name)
+ }
+ }
+}
+
+// ParseTarget handles target parsing of an APE chain.
+func ParseTarget(cmd *cobra.Command) engine.Target {
+ typ := ParseTargetType(cmd)
+ name, _ := cmd.Flags().GetString(TargetNameFlag)
+ switch typ {
+ case engine.Namespace:
+ if name == "" {
+ ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace))
+ commonCmd.ExitOnErr(cmd, "read line error: %w", err)
+ ln = strings.ToLower(ln)
+ if len(ln) > 0 && (ln[0] == 'n') {
+ commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined)
+ }
+ name = defaultNamespace
+ }
+ return engine.NamespaceTarget(name)
+ case engine.Container:
+ var cnr cid.ID
+ commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
+ return engine.ContainerTarget(name)
+ case engine.User:
+ return engine.UserTarget(name)
+ case engine.Group:
+ return engine.GroupTarget(name)
+ default:
+ commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
+ }
+ panic("unreachable")
+}
+
+// ParseTargetType handles target type parsing of an APE chain.
+func ParseTargetType(cmd *cobra.Command) engine.TargetType {
+ typ, _ := cmd.Flags().GetString(TargetTypeFlag)
+ switch typ {
+ case namespaceTarget:
+ return engine.Namespace
+ case containerTarget:
+ return engine.Container
+ case userTarget:
+ return engine.User
+ case groupTarget:
+ return engine.Group
+ default:
+ commonCmd.ExitOnErr(cmd, "parse target type error: %w", errUnknownTargetType)
+ }
+ panic("unreachable")
+}
+
+// ParseChainID handles the parsing of APE-chain identifier.
+// For some subcommands, chain ID is optional as an input parameter and should be generated by
+// the service instead.
+func ParseChainID(cmd *cobra.Command) (id apechain.ID) {
+ chainID, _ := cmd.Flags().GetString(ChainIDFlag)
+ id = apechain.ID(chainID)
+
+ hexEncoded, _ := cmd.Flags().GetBool(ChainIDHexFlag)
+ if !hexEncoded {
+ return
+ }
+
+ chainIDRaw, err := hex.DecodeString(chainID)
+ commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
+ id = apechain.ID(chainIDRaw)
+ return
+}
+
+// ParseChain parses an APE chain which can be provided either as a rule statement
+// or loaded from a binary/JSON file path.
+func ParseChain(cmd *cobra.Command) *apechain.Chain {
+ chain := new(apechain.Chain)
+ chain.ID = ParseChainID(cmd)
+
+ if rules, _ := cmd.Flags().GetStringArray(RuleFlag); len(rules) > 0 {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", apeutil.ParseAPEChain(chain, rules))
+ } else if encPath, _ := cmd.Flags().GetString(PathFlag); encPath != "" {
+ commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", apeutil.ParseAPEChainBinaryOrJSON(chain, encPath))
+ } else {
+ commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed"))
+ }
+
+ cmd.Println("Parsed chain:")
+ PrintHumanReadableAPEChain(cmd, chain)
+
+ return chain
+}
+
+// ParseChainName parses chain name: the place in the request lifecycle where policy is applied.
+func ParseChainName(cmd *cobra.Command) apechain.Name {
+ chainName, _ := cmd.Flags().GetString(ChainNameFlag)
+ apeChainName, ok := mChainName[strings.ToLower(chainName)]
+ if !ok {
+ commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName)
+ }
+ return apeChainName
+}
diff --git a/cmd/internal/common/ape/flags.go b/cmd/internal/common/ape/flags.go
new file mode 100644
index 000000000..d8b2e88a2
--- /dev/null
+++ b/cmd/internal/common/ape/flags.go
@@ -0,0 +1,79 @@
+package ape
+
+const (
+ RuleFlag = "rule"
+ PathFlag = "path"
+ PathFlagDesc = "Path to encoded chain in JSON or binary format"
+ TargetNameFlag = "target-name"
+ TargetNameFlagDesc = "Resource name in APE resource name format"
+ TargetTypeFlag = "target-type"
+ TargetTypeFlagDesc = "Resource type(container/namespace)"
+ ChainIDFlag = "chain-id"
+ ChainIDFlagDesc = "Chain id"
+ ChainIDHexFlag = "chain-id-hex"
+ ChainIDHexFlagDesc = "Flag to parse chain ID as hex"
+ ChainNameFlag = "chain-name"
+ ChainNameFlagDesc = "Chain name(ingress|s3)"
+ AllFlag = "all"
+)
+
+const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format:
+ [:status_detail] ... ... ...
+
+Status:
+ - allow Permits specified actions
+ - deny Prohibits specified actions
+ - deny:QuotaLimitReached Denies access due to quota limits
+
+Actions:
+ Object operations:
+ - Object.Put, Object.Get, etc.
+ - Object.* (all object operations)
+ Container operations:
+ - Container.Put, Container.Get, etc.
+ - Container.* (all container operations)
+
+Conditions:
+ ResourceCondition:
+ Format: ResourceCondition:"key"=value, "key"!=value
+ Reserved properties (use '\' before '$'):
+ - $Object:version
+ - $Object:objectID
+ - $Object:containerID
+ - $Object:ownerID
+ - $Object:creationEpoch
+ - $Object:payloadLength
+ - $Object:payloadHash
+ - $Object:objectType
+ - $Object:homomorphicHash
+
+RequestCondition:
+ Format: RequestCondition:"key"=value, "key"!=value
+ Reserved properties (use '\' before '$'):
+ - $Actor:publicKey
+ - $Actor:role
+
+ Example:
+ ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others
+
+Resources:
+ For objects:
+ - namespace/cid/oid (specific object)
+ - namespace/cid/* (all objects in container)
+ - namespace/* (all objects in namespace)
+ - * (all objects)
+ - /* (all objects in root namespace)
+ - /cid/* (all objects in root container)
+ - /cid/oid (specific object in root container)
+
+ For containers:
+ - namespace/cid (specific container)
+ - namespace/* (all containers in namespace)
+ - * (all containers)
+ - /cid (root container)
+ - /* (all root containers)
+
+Notes:
+ - Cannot mix object and container operations in one rule
+ - Default behavior is Any=false unless 'any' is specified
+ - Use 'all' keyword to explicitly set Any=false`
diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go
index 9e4fa3098..13f447af4 100644
--- a/cmd/internal/common/exit.go
+++ b/cmd/internal/common/exit.go
@@ -26,13 +26,15 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) {
_ = iota
internal
aclDenied
+ apemanagerDenied
)
var (
code int
- internalErr = new(sdkstatus.ServerInternal)
- accessErr = new(sdkstatus.ObjectAccessDenied)
+ internalErr = new(sdkstatus.ServerInternal)
+ accessErr = new(sdkstatus.ObjectAccessDenied)
+ apemanagerErr = new(sdkstatus.APEManagerAccessDenied)
)
switch {
@@ -41,13 +43,21 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) {
case errors.As(err, &accessErr):
code = aclDenied
err = fmt.Errorf("%w: %s", err, accessErr.Reason())
+ case errors.As(err, &apemanagerErr):
+ code = apemanagerDenied
+ err = fmt.Errorf("%w: %s", err, apemanagerErr.Reason())
default:
code = internal
}
cmd.PrintErrln(err)
- if cmd.PersistentPostRun != nil {
- cmd.PersistentPostRun(cmd, nil)
+ for p := cmd; p != nil; p = p.Parent() {
+ if p.PersistentPostRun != nil {
+ p.PersistentPostRun(cmd, nil)
+ if !cobra.EnableTraverseRunHooks {
+ break
+ }
+ }
}
os.Exit(code)
}
diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go
index 79b03a726..5dd1a060e 100644
--- a/cmd/internal/common/netmap.go
+++ b/cmd/internal/common/netmap.go
@@ -14,28 +14,28 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo,
) {
var strState string
- switch {
+ switch node.Status() {
default:
strState = "STATE_UNSUPPORTED"
- case node.IsOnline():
+ case netmap.Online:
strState = "ONLINE"
- case node.IsOffline():
+ case netmap.Offline:
strState = "OFFLINE"
- case node.IsMaintenance():
+ case netmap.Maintenance:
strState = "MAINTENANCE"
}
cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState)
- netmap.IterateNetworkEndpoints(node, func(endpoint string) {
+ for endpoint := range node.NetworkEndpoints() {
cmd.Printf("%s ", endpoint)
- })
+ }
cmd.Println()
if !short {
- node.IterateAttributes(func(key, value string) {
+ for key, value := range node.Attributes() {
cmd.Printf("%s\t%s: %s\n", indent, key, value)
- })
+ }
}
}
diff --git a/config/example/ir.env b/config/example/ir.env
index 7234a4b32..c13044a6e 100644
--- a/config/example/ir.env
+++ b/config/example/ir.env
@@ -1,5 +1,7 @@
FROSTFS_IR_LOGGER_LEVEL=info
FROSTFS_IR_LOGGER_TIMESTAMP=true
+FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph"
+FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug"
FROSTFS_IR_WALLET_PATH=/path/to/wallet.json
FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX
@@ -80,3 +82,12 @@ FROSTFS_IR_PPROF_MUTEX_RATE=10000
FROSTFS_IR_PROMETHEUS_ENABLED=true
FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090
FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s
+
+FROSTFS_MULTINET_ENABLED=true
+FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
+FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
+FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
+FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
+FROSTFS_MULTINET_BALANCER=roundrobin
+FROSTFS_MULTINET_RESTRICT=false
+FROSTFS_MULTINET_FALLBACK_DELAY=350ms
diff --git a/config/example/ir.yaml b/config/example/ir.yaml
index 4c64f088b..ed53f014b 100644
--- a/config/example/ir.yaml
+++ b/config/example/ir.yaml
@@ -3,6 +3,9 @@
logger:
level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
timestamp: true
+ tags:
+ - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`.
+ level: debug
wallet:
path: /path/to/wallet.json # Path to NEP-6 NEO wallet file
@@ -123,3 +126,18 @@ prometheus:
systemdnotify:
enabled: true
+
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
diff --git a/config/example/node.env b/config/example/node.env
index 6618a981a..9a2426358 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -1,6 +1,8 @@
FROSTFS_LOGGER_LEVEL=debug
FROSTFS_LOGGER_DESTINATION=journald
FROSTFS_LOGGER_TIMESTAMP=true
+FROSTFS_LOGGER_TAGS_0_NAMES="main, morph"
+FROSTFS_LOGGER_TAGS_0_LEVEL="debug"
FROSTFS_PPROF_ENABLED=true
FROSTFS_PPROF_ADDRESS=localhost:6060
@@ -20,9 +22,9 @@ FROSTFS_NODE_WALLET_PASSWORD=password
FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083"
FROSTFS_NODE_ATTRIBUTE_0=Price:11
FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK"
-FROSTFS_NODE_RELAY=true
FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions
FROSTFS_NODE_PERSISTENT_STATE_PATH=/state
+FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db
# Tree service section
FROSTFS_TREE_ENABLED=true
@@ -31,6 +33,7 @@ FROSTFS_TREE_REPLICATION_CHANNEL_CAPACITY=32
FROSTFS_TREE_REPLICATION_WORKER_COUNT=32
FROSTFS_TREE_REPLICATION_TIMEOUT=5s
FROSTFS_TREE_SYNC_INTERVAL=1h
+FROSTFS_TREE_SYNC_BATCH_SIZE=2000
FROSTFS_TREE_AUTHORIZED_KEYS="0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56"
# gRPC section
@@ -82,14 +85,20 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
FROSTFS_REPLICATOR_POOL_SIZE=10
+# Container service section
+FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
+
# Object service section
-FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
-FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
+FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
+
+FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
+FROSTFS_RPC_LIMITS_0_MAX_OPS=1000
+FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
+FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
# Storage engine section
-FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
## 0 shard
### Flag to refill Metabase from BlobStor
@@ -114,7 +123,8 @@ FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644
FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100
FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms
### Blobstor config
-FROSTFS_STORAGE_SHARD_0_COMPRESS=true
+FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true
+FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest
FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*"
FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true
FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7
@@ -149,6 +159,54 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
#### Limit of concurrent workers collecting expired objects by the garbage collector
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
+#### Limits config
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100
## 1 shard
### Flag to refill Metabase from BlobStor
@@ -201,8 +259,25 @@ FROSTFS_TRACING_ENABLED=true
FROSTFS_TRACING_ENDPOINT="localhost"
FROSTFS_TRACING_EXPORTER="otlp_grpc"
FROSTFS_TRACING_TRUSTED_CA=""
+FROSTFS_TRACING_ATTRIBUTES_0_KEY=key0
+FROSTFS_TRACING_ATTRIBUTES_0_VALUE=value
+FROSTFS_TRACING_ATTRIBUTES_1_KEY=key1
+FROSTFS_TRACING_ATTRIBUTES_1_VALUE=value
FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
# AUDIT section
FROSTFS_AUDIT_ENABLED=true
+
+# MULTINET section
+FROSTFS_MULTINET_ENABLED=true
+FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
+FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
+FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
+FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
+FROSTFS_MULTINET_BALANCER=roundrobin
+FROSTFS_MULTINET_RESTRICT=false
+FROSTFS_MULTINET_FALLBACK_DELAY=350ms
+
+FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
+FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
diff --git a/config/example/node.json b/config/example/node.json
index 0d100ed80..6b7a9c2c6 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -2,7 +2,13 @@
"logger": {
"level": "debug",
"destination": "journald",
- "timestamp": true
+ "timestamp": true,
+ "tags": [
+ {
+ "names": "main, morph",
+ "level": "debug"
+ }
+ ]
},
"pprof": {
"enabled": true,
@@ -31,13 +37,13 @@
],
"attribute_0": "Price:11",
"attribute_1": "UN-LOCODE:RU MSK",
- "relay": true,
"persistent_sessions": {
"path": "/sessions"
},
"persistent_state": {
"path": "/state"
- }
+ },
+ "locode_db_path": "/path/to/locode/db"
},
"grpc": {
"0": {
@@ -69,6 +75,7 @@
"replication_worker_count": 32,
"replication_timeout": "5s",
"sync_interval": "1h",
+ "sync_batch_size": 2000,
"authorized_keys": [
"0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0",
"02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56"
@@ -123,18 +130,40 @@
"pool_size": 10,
"put_timeout": "15s"
},
+ "container": {
+ "list_stream": {
+ "batch_size": "500"
+ }
+ },
"object": {
"delete": {
"tombstone_lifetime": 10
},
"put": {
- "remote_pool_size": 100,
- "local_pool_size": 200,
"skip_session_token_issuer_verification": true
+ },
+ "get": {
+ "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
}
},
+ "rpc": {
+ "limits": [
+ {
+ "methods": [
+ "/neo.fs.v2.object.ObjectService/PutSingle",
+ "/neo.fs.v2.object.ObjectService/Put"
+ ],
+ "max_ops": 1000
+ },
+ {
+ "methods": [
+ "/neo.fs.v2.object.ObjectService/Get"
+ ],
+ "max_ops": 10000
+ }
+ ]
+ },
"storage": {
- "shard_pool_size": 15,
"shard_ro_error_threshold": 100,
"shard": {
"0": {
@@ -159,12 +188,15 @@
"max_batch_size": 100,
"max_batch_delay": "10ms"
},
- "compress": true,
- "compression_exclude_content_types": [
- "audio/*", "video/*"
- ],
- "compression_estimate_compressibility": true,
- "compression_estimate_compressibility_threshold": 0.7,
+ "compression": {
+ "enabled": true,
+ "level": "fastest",
+ "exclude_content_types": [
+ "audio/*", "video/*"
+ ],
+ "estimate_compressibility": true,
+ "estimate_compressibility_threshold": 0.7
+ },
"small_object_size": 102400,
"blobstor": [
{
@@ -197,6 +229,87 @@
"remover_sleep_interval": "2m",
"expired_collector_batch_size": 1500,
"expired_collector_worker_count": 15
+ },
+ "limits": {
+ "read": {
+ "max_running_ops": 10000,
+ "max_waiting_ops": 1000,
+ "idle_timeout": "30s",
+ "tags": [
+ {
+ "tag": "internal",
+ "weight": 20,
+ "limit_ops": 0,
+ "reserved_ops": 1000
+ },
+ {
+ "tag": "client",
+ "weight": 70,
+ "reserved_ops": 10000
+ },
+ {
+ "tag": "background",
+ "weight": 5,
+ "limit_ops": 10000,
+ "reserved_ops": 0
+ },
+ {
+ "tag": "writecache",
+ "weight": 5,
+ "limit_ops": 25000
+ },
+ {
+ "tag": "policer",
+ "weight": 5,
+ "limit_ops": 25000,
+ "prohibited": true
+ },
+ {
+ "tag": "treesync",
+ "weight": 5,
+ "limit_ops": 25
+ }
+ ]
+ },
+ "write": {
+ "max_running_ops": 1000,
+ "max_waiting_ops": 100,
+ "idle_timeout": "45s",
+ "tags": [
+ {
+ "tag": "internal",
+ "weight": 200,
+ "limit_ops": 0,
+ "reserved_ops": 100
+ },
+ {
+ "tag": "client",
+ "weight": 700,
+ "reserved_ops": 1000
+ },
+ {
+ "tag": "background",
+ "weight": 50,
+ "limit_ops": 1000,
+ "reserved_ops": 0
+ },
+ {
+ "tag": "writecache",
+ "weight": 50,
+ "limit_ops": 2500
+ },
+ {
+ "tag": "policer",
+ "weight": 50,
+ "limit_ops": 2500
+ },
+ {
+ "tag": "treesync",
+ "weight": 50,
+ "limit_ops": 100
+ }
+ ]
+ }
}
},
"1": {
@@ -217,7 +330,9 @@
"max_batch_size": 200,
"max_batch_delay": "20ms"
},
- "compress": false,
+ "compression": {
+ "enabled": false
+ },
"small_object_size": 102400,
"blobstor": [
{
@@ -255,14 +370,60 @@
},
"tracing": {
"enabled": true,
- "endpoint": "localhost:9090",
+ "endpoint": "localhost",
"exporter": "otlp_grpc",
- "trusted_ca": "/etc/ssl/tracing.pem"
+ "trusted_ca": "",
+ "attributes":[
+ {
+ "key": "key0",
+ "value": "value"
+ },
+ {
+ "key": "key1",
+ "value": "value"
+ }
+ ]
},
"runtime": {
"soft_memory_limit": 1073741824
},
"audit": {
"enabled": true
+ },
+ "multinet": {
+ "enabled": true,
+ "subnets": [
+ {
+ "mask": "192.168.219.174/24",
+ "source_ips": [
+ "192.168.218.185",
+ "192.168.219.185"
+ ]
+ },
+ {
+ "mask": "10.78.70.74/24",
+ "source_ips":[
+ "10.78.70.185",
+ "10.78.71.185"
+ ]
+ }
+ ],
+ "balancer": "roundrobin",
+ "restrict": false,
+ "fallback_delay": "350ms"
+ },
+ "qos": {
+ "critical": {
+ "authorized_keys": [
+ "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11",
+ "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
+ ]
+ },
+ "internal": {
+ "authorized_keys": [
+ "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2",
+ "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
+ ]
+ }
}
}
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 86be35ba8..2d4bc90fb 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -2,6 +2,9 @@ logger:
level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
destination: journald # logger destination: one of "stdout" (default), "journald"
timestamp: true
+ tags:
+ - names: "main, morph"
+ level: debug
systemdnotify:
enabled: true
@@ -31,11 +34,11 @@ node:
- grpcs://localhost:8083
attribute_0: "Price:11"
attribute_1: UN-LOCODE:RU MSK
- relay: true # start Storage node in relay mode without bootstrapping into the Network map
persistent_sessions:
path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions)
persistent_state:
path: /state # path to persistent state file of Storage node
+ "locode_db_path": "/path/to/locode/db"
grpc:
- endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server
@@ -59,6 +62,7 @@ tree:
replication_channel_capacity: 32
replication_timeout: 5s
sync_interval: 1h
+ sync_batch_size: 2000
authorized_keys: # list of hex-encoded public keys that have rights to use the Tree Service with frostfs-cli
- 0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0
- 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56
@@ -78,9 +82,11 @@ contracts: # side chain NEOFS contract script hashes; optional, override values
morph:
dial_timeout: 30s # timeout for side chain NEO RPC client connection
- cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
+ cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls).
+ # Negative value disables caching. A zero value sets the default value.
# Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables.
+ container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node
rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success
- address: wss://rpc1.morph.frostfs.info:40341/ws
@@ -92,6 +98,9 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
ape_chain_cache_size: 100000
+ netmap:
+ candidates:
+ poll_interval: 20s
apiclient:
dial_timeout: 15s # timeout for FrostFS API client connection
@@ -106,17 +115,31 @@ replicator:
put_timeout: 15s # timeout for the Replicator PUT remote operation
pool_size: 10 # maximum amount of concurrent replications
+container:
+ list_stream:
+ batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
+
object:
delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
put:
- remote_pool_size: 100 # number of async workers for remote PUT operations
- local_pool_size: 200 # number of async workers for local PUT operations
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
+ get:
+ priority: # list of metrics of nodes for prioritization
+ - $attribute:ClusterName
+ - $attribute:UN-LOCODE
+
+rpc:
+ limits:
+ - methods:
+ - /neo.fs.v2.object.ObjectService/PutSingle
+ - /neo.fs.v2.object.ObjectService/Put
+ max_ops: 1000
+ - methods:
+ - /neo.fs.v2.object.ObjectService/Get
+ max_ops: 10000
storage:
- # note: shard configuration can be omitted for relay node (see `node.relay`)
- shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
shard:
@@ -130,7 +153,7 @@ storage:
flush_worker_count: 30 # number of write-cache flusher threads
metabase:
- perm: 0644 # permissions for metabase files(directories: +x for current user and group)
+ perm: 0o644 # permissions for metabase files(directories: +x for current user and group)
max_batch_size: 200
max_batch_delay: 20ms
@@ -138,18 +161,19 @@ storage:
max_batch_delay: 5ms # maximum delay for a batch of operations to be executed
max_batch_size: 100 # maximum amount of operations in a single batch
- compress: false # turn on/off zstd(level 3) compression of stored objects
+ compression:
+ enabled: false # turn on/off zstd compression of stored objects
small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
blobstor:
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
+ perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
- - perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
+ - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS
gc:
@@ -180,12 +204,14 @@ storage:
max_batch_size: 100
max_batch_delay: 10ms
- compress: true # turn on/off zstd(level 3) compression of stored objects
- compression_exclude_content_types:
- - audio/*
- - video/*
- compression_estimate_compressibility: true
- compression_estimate_compressibility_threshold: 0.7
+ compression:
+ enabled: true # turn on/off zstd compression of stored objects
+ level: fastest
+ exclude_content_types:
+ - audio/*
+ - video/*
+ estimate_compressibility: true
+ estimate_compressibility_threshold: 0.7
blobstor:
- type: blobovnicza
@@ -208,6 +234,59 @@ storage:
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
+ limits:
+ read:
+ max_running_ops: 10000
+ max_waiting_ops: 1000
+ idle_timeout: 30s
+ tags:
+ - tag: internal
+ weight: 20
+ limit_ops: 0
+ reserved_ops: 1000
+ - tag: client
+ weight: 70
+ reserved_ops: 10000
+ - tag: background
+ weight: 5
+ limit_ops: 10000
+ reserved_ops: 0
+ - tag: writecache
+ weight: 5
+ limit_ops: 25000
+ - tag: policer
+ weight: 5
+ limit_ops: 25000
+ prohibited: true
+ - tag: treesync
+ weight: 5
+ limit_ops: 25
+ write:
+ max_running_ops: 1000
+ max_waiting_ops: 100
+ idle_timeout: 45s
+ tags:
+ - tag: internal
+ weight: 200
+ limit_ops: 0
+ reserved_ops: 100
+ - tag: client
+ weight: 700
+ reserved_ops: 1000
+ - tag: background
+ weight: 50
+ limit_ops: 1000
+ reserved_ops: 0
+ - tag: writecache
+ weight: 50
+ limit_ops: 2500
+ - tag: policer
+ weight: 50
+ limit_ops: 2500
+ - tag: treesync
+ weight: 50
+ limit_ops: 100
+
1:
writecache:
path: tmp/1/cache # write-cache root directory
@@ -226,16 +305,46 @@ storage:
pilorama:
path: tmp/1/blob/pilorama.db
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
- perm: 0644 # permission to use for the database file and intermediate directories
+ perm: 0o644 # permission to use for the database file and intermediate directories
tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost"
trusted_ca: ""
+ attributes:
+ - key: key0
+ value: value
+ - key: key1
+ value: value
runtime:
soft_memory_limit: 1gb
audit:
enabled: true
+
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
+
+qos:
+ critical:
+ authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag
+ - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
+ - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
+ internal:
+ authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag
+ - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2
+ - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a
diff --git a/config/mainnet/README.md b/config/mainnet/README.md
deleted file mode 100644
index 717a9b0ff..000000000
--- a/config/mainnet/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# N3 Mainnet Storage node configuration
-
-Here is a template for simple storage node configuration in N3 Mainnet.
-Make sure to specify correct values instead of `<...>` placeholders.
-Do not change `contracts` section. Run the latest frostfs-node release with
-the fixed config `frostfs-node -c config.yml`
-
-To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
-The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
-(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
-
-## Tips
-
-Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
-```yaml
-node:
- addresses:
- - grpcs://frostfs.my.org:8080
-
-grpc:
- num: 1
- 0:
- endpoint: frostfs.my.org:8080
- tls:
- enabled: true
- certificate: /path/to/cert
- key: /path/to/key
-```
diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml
deleted file mode 100644
index d86ea451f..000000000
--- a/config/mainnet/config.yml
+++ /dev/null
@@ -1,70 +0,0 @@
-node:
- wallet:
- path:
- address:
- password:
- addresses:
- -
- attribute_0: UN-LOCODE:
- attribute_1: Price:100000
- attribute_2: User-Agent:FrostFS\/0.9999
-
-grpc:
- num: 1
- 0:
- endpoint:
- tls:
- enabled: false
-
-storage:
- shard_num: 1
- shard:
- 0:
- metabase:
- path: /storage/path/metabase
- perm: 0600
- blobstor:
- - path: /storage/path/blobovnicza
- type: blobovnicza
- perm: 0600
- opened_cache_capacity: 32
- depth: 1
- width: 1
- - path: /storage/path/fstree
- type: fstree
- perm: 0600
- depth: 4
- writecache:
- enabled: false
- gc:
- remover_batch_size: 100
- remover_sleep_interval: 1m
-
-logger:
- level: info
-
-prometheus:
- enabled: true
- address: localhost:9090
- shutdown_timeout: 15s
-
-object:
- put:
- remote_pool_size: 100
- local_pool_size: 100
-
-morph:
- rpc_endpoint:
- - wss://rpc1.morph.frostfs.info:40341/ws
- - wss://rpc2.morph.frostfs.info:40341/ws
- - wss://rpc3.morph.frostfs.info:40341/ws
- - wss://rpc4.morph.frostfs.info:40341/ws
- - wss://rpc5.morph.frostfs.info:40341/ws
- - wss://rpc6.morph.frostfs.info:40341/ws
- - wss://rpc7.morph.frostfs.info:40341/ws
- dial_timeout: 20s
-
-contracts:
- balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
- container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
- netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1
diff --git a/config/testnet/README.md b/config/testnet/README.md
deleted file mode 100644
index e2cda33ec..000000000
--- a/config/testnet/README.md
+++ /dev/null
@@ -1,129 +0,0 @@
-# N3 Testnet Storage node configuration
-
-There is a prepared configuration for NeoFS Storage Node deployment in
-N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
-docker image and run it with docker-compose.
-
-## Build image
-
-Prepared **frostfs-storage-testnet** image is available at Docker Hub.
-However, if you need to rebuild it for some reason, run
-`make image-storage-testnet` command.
-
-```
-$ make image-storage-testnet
-...
-Successfully built ab0557117b02
-Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
-```
-
-## Deploy node
-
-To run a storage node in N3 Testnet environment, you should deposit GAS assets,
-update docker-compose file and start the node.
-
-### Deposit
-
-The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
-bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
-
-First, obtain GAS in N3 Testnet chain. You can do that with
-[faucet](https://neowish.ngd.network) service.
-
-Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
-You can provide scripthash in the `data` argument of transfer tx to make a
-deposit to a specified account. Otherwise, deposit is made to the tx sender.
-
-NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
-so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
-
-See a deposit example with `neo-go`.
-
-```
-neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
---from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
---to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
---token GAS \
---amount 1
-```
-
-### Configure
-
-Next, configure `node_config.env` file. Change endpoints values. Both
-should contain your **public** IP.
-
-```
-NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
-NEOFS_NODE_ADDRESSES=65.52.183.157:36512
-```
-
-Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
-attribute.
-
-```
-NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
-NEOFS_NODE_ADDRESSES=65.52.183.157:36512
-NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
-```
-
-You can validate UN/LOCODE attribute in
-[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
-with frostfs-cli.
-
-```
-$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
-Country: Russia
-Location: Saint Petersburg (ex Leningrad)
-Continent: Europe
-Subdivision: [SPE] Sankt-Peterburg
-Coordinates: 59.53, 30.15
-```
-
-It is recommended to pass the node's key as a file. To do so, convert your wallet
-WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
-
-```
-// Print WIF in a 32-byte hex format
-$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
-PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
-PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
-WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
-Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
-ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
-ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
-
-// Save 32-byte hex into a file
-$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
-```
-
-Then, specify the path to this file in `docker-compose.yml`
-```yaml
- volumes:
- - frostfs_storage:/storage
- - ./my_wallet.key:/node.key
-```
-
-
-NeoFS objects will be stored on your machine. By default, docker-compose
-is configured to store objects in named docker volume `frostfs_storage`. You can
-specify a directory on the filesystem to store objects there.
-
-```yaml
- volumes:
- - /home/username/frostfs/rc3/storage:/storage
- - ./my_wallet.key:/node.key
-```
-
-### Start
-
-Run the node with `docker-compose up` command and stop it with `docker-compose down`.
-
-### Debug
-
-To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
-log, set up log level to debug with this env:
-
-```yaml
- environment:
- - NEOFS_LOGGER_LEVEL=debug
-```
diff --git a/config/testnet/config.yml b/config/testnet/config.yml
deleted file mode 100644
index 76b36cdf6..000000000
--- a/config/testnet/config.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-logger:
- level: info
-
-morph:
- rpc_endpoint:
- - wss://rpc01.morph.testnet.frostfs.info:51331/ws
- - wss://rpc02.morph.testnet.frostfs.info:51331/ws
- - wss://rpc03.morph.testnet.frostfs.info:51331/ws
- - wss://rpc04.morph.testnet.frostfs.info:51331/ws
- - wss://rpc05.morph.testnet.frostfs.info:51331/ws
- - wss://rpc06.morph.testnet.frostfs.info:51331/ws
- - wss://rpc07.morph.testnet.frostfs.info:51331/ws
- dial_timeout: 20s
-
-contracts:
- balance: e0420c216003747626670d1424569c17c79015bf
- container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
- netmap: d4b331639799e2958d4bc5b711b469d79de94e01
-
-node:
- key: /node.key
- attribute_0: Deployed:SelfHosted
- attribute_1: User-Agent:FrostFS\/0.9999
-
-prometheus:
- enabled: true
- address: localhost:9090
- shutdown_timeout: 15s
-
-storage:
- shard_num: 1
- shard:
- 0:
- metabase:
- path: /storage/metabase
- perm: 0777
- blobstor:
- - path: /storage/path/blobovnicza
- type: blobovnicza
- perm: 0600
- opened_cache_capacity: 32
- depth: 1
- width: 1
- - path: /storage/path/fstree
- type: fstree
- perm: 0600
- depth: 4
- writecache:
- enabled: false
- gc:
- remover_batch_size: 100
- remover_sleep_interval: 1m
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 47328c419..000000000
--- a/debian/changelog
+++ /dev/null
@@ -1,5 +0,0 @@
-frostfs-node (0.0.1) stable; urgency=medium
-
- * Initial package build
-
- -- TrueCloudLab Tue, 25 Oct 2022 21:10:49 +0300
diff --git a/debian/clean b/debian/clean
deleted file mode 100644
index 44dc05e0a..000000000
--- a/debian/clean
+++ /dev/null
@@ -1,2 +0,0 @@
-man/
-debian/*.bash-completion
diff --git a/debian/control b/debian/control
deleted file mode 100644
index f3f214bca..000000000
--- a/debian/control
+++ /dev/null
@@ -1,39 +0,0 @@
-Source: frostfs-node
-Section: misc
-Priority: optional
-Maintainer: TrueCloudLab
-Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts
-Standards-Version: 4.5.1
-Homepage: https://fs.neo.org/
-Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-node.git
-Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-node
-
-Package: frostfs-storage
-Architecture: any
-Depends: ${misc:Depends}
-Description: FrostFS Storage node
- FrostFS is a decentralized distributed object storage integrated with the NEO
- Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
- of storing and distributing user's data. Any Neo user may participate in the
- network and get paid for providing storage resources to other users or store
- their data in FrostFS and pay a competitive price for it.
-
-Package: frostfs-ir
-Architecture: any
-Depends: ${misc:Depends}, frostfs-locode-db
-Description: FrostFS InnerRing node
- FrostFS is a decentralized distributed object storage integrated with the NEO
- Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
- of storing and distributing user's data. Any Neo user may participate in the
- network and get paid for providing storage resources to other users or store
- their data in FrostFS and pay a competitive price for it.
-
-Package: frostfs-cli
-Architecture: any
-Depends: ${misc:Depends}
-Description: CLI tools for FrostFS
- FrostFS is a decentralized distributed object storage integrated with the NEO
- Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
- of storing and distributing user's data. Any Neo user may participate in the
- network and get paid for providing storage resources to other users or store
- their data in FrostFS and pay a competitive price for it.
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index 61dab665d..000000000
--- a/debian/copyright
+++ /dev/null
@@ -1,23 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: frostfs-node
-Upstream-Contact: tech@frostfs.info
-Source: https://git.frostfs.info/TrueCloudLab/frostfs-node
-
-Files: *
-Copyright: 2022-2023 TrueCloudLab (@TrueCloudLab), contributors of FrostFS project
- 2018-2022 NeoSPCC (@nspcc-dev), contributors of NeoFS project
- (https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/CREDITS.md)
-
-License: GPL-3
- This program is free software: you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published
- by the Free Software Foundation; version 3.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program or at /usr/share/common-licenses/GPL-3
- If not, see .
diff --git a/debian/frostfs-cli.docs b/debian/frostfs-cli.docs
deleted file mode 100644
index 58d4559cc..000000000
--- a/debian/frostfs-cli.docs
+++ /dev/null
@@ -1,4 +0,0 @@
-CONTRIBUTING.md
-CREDITS.md
-README.md
-cmd/frostfs-adm/docs
diff --git a/debian/frostfs-cli.install b/debian/frostfs-cli.install
deleted file mode 100644
index 93025187b..000000000
--- a/debian/frostfs-cli.install
+++ /dev/null
@@ -1,3 +0,0 @@
-bin/frostfs-adm usr/bin
-bin/frostfs-cli usr/bin
-bin/frostfs-lens usr/bin
diff --git a/debian/frostfs-cli.manpages b/debian/frostfs-cli.manpages
deleted file mode 100644
index 85c5e001d..000000000
--- a/debian/frostfs-cli.manpages
+++ /dev/null
@@ -1 +0,0 @@
-man/*
diff --git a/debian/frostfs-ir.dirs b/debian/frostfs-ir.dirs
deleted file mode 100644
index 90da8fd27..000000000
--- a/debian/frostfs-ir.dirs
+++ /dev/null
@@ -1,2 +0,0 @@
-/etc/frostfs/ir
-/var/lib/frostfs/ir
diff --git a/debian/frostfs-ir.docs b/debian/frostfs-ir.docs
deleted file mode 100644
index 38b0cef26..000000000
--- a/debian/frostfs-ir.docs
+++ /dev/null
@@ -1,3 +0,0 @@
-CONTRIBUTING.md
-CREDITS.md
-README.md
diff --git a/debian/frostfs-ir.install b/debian/frostfs-ir.install
deleted file mode 100644
index e052f5434..000000000
--- a/debian/frostfs-ir.install
+++ /dev/null
@@ -1 +0,0 @@
-bin/frostfs-ir usr/bin
diff --git a/debian/frostfs-ir.postinst b/debian/frostfs-ir.postinst
deleted file mode 100755
index eb9d381c9..000000000
--- a/debian/frostfs-ir.postinst
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/sh
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * `configure'
-# * `abort-upgrade'
-# * `abort-remove' `in-favour'
-#
-# * `abort-remove'
-# * `abort-deconfigure' `in-favour'
-# `removing'
-#
-# for details, see https://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-case "$1" in
- configure)
- USERNAME=ir
- id -u frostfs-ir >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/ir --system -M -U -c "FrostFS InnerRing node" frostfs-ir
- if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
- chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
- chmod -f 0750 /etc/frostfs/$USERNAME
- chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml
- chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml
- chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
- chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
- fi
- USERDIR="$(getent passwd frostfs-$USERNAME | cut -d: -f6)"
- if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
- chown -f frostfs-$USERNAME: "$USERDIR"
- fi
- ;;
-
- abort-upgrade|abort-remove|abort-deconfigure)
- ;;
-
- *)
- echo "postinst called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/frostfs-ir.postrm b/debian/frostfs-ir.postrm
deleted file mode 100755
index cbb7db2f2..000000000
--- a/debian/frostfs-ir.postrm
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * `remove'
-# * `purge'
-# * `upgrade'
-# * `failed-upgrade'
-# * `abort-install'
-# * `abort-install'
-# * `abort-upgrade'
-# * `disappear'
-#
-# for details, see https://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-
-case "$1" in
- purge)
- rm -rf /var/lib/frostfs/ir/*
- ;;
-
- remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
- ;;
-
- *)
- echo "postrm called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/frostfs-ir.preinst b/debian/frostfs-ir.preinst
deleted file mode 100755
index 37f952537..000000000
--- a/debian/frostfs-ir.preinst
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * `install'
-# * `install'
-# * `upgrade'
-# * `abort-upgrade'
-# for details, see https://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-
-case "$1" in
- install|upgrade)
- ;;
-
- abort-upgrade)
- ;;
-
- *)
- echo "preinst called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/frostfs-ir.prerm b/debian/frostfs-ir.prerm
deleted file mode 100755
index 0da369d75..000000000
--- a/debian/frostfs-ir.prerm
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * `remove'
-# * `upgrade'
-# * `failed-upgrade'
-# * `remove' `in-favour'
-# * `deconfigure' `in-favour'
-# `removing'
-#
-# for details, see https://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-
-case "$1" in
- remove|upgrade|deconfigure)
- ;;
-
- failed-upgrade)
- ;;
-
- *)
- echo "prerm called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/frostfs-ir.service b/debian/frostfs-ir.service
deleted file mode 100644
index 304017f68..000000000
--- a/debian/frostfs-ir.service
+++ /dev/null
@@ -1,17 +0,0 @@
-[Unit]
-Description=FrostFS InnerRing node
-Requires=network.target
-
-[Service]
-Type=notify
-NotifyAccess=all
-ExecStart=/usr/bin/frostfs-ir --config /etc/frostfs/ir/config.yml
-User=frostfs-ir
-Group=frostfs-ir
-WorkingDirectory=/var/lib/frostfs/ir
-Restart=always
-RestartSec=5
-PrivateTmp=true
-
-[Install]
-WantedBy=multi-user.target
diff --git a/debian/frostfs-storage.dirs b/debian/frostfs-storage.dirs
deleted file mode 100644
index 4142145ee..000000000
--- a/debian/frostfs-storage.dirs
+++ /dev/null
@@ -1,3 +0,0 @@
-/etc/frostfs/storage
-/srv/frostfs
-/var/lib/frostfs/storage
diff --git a/debian/frostfs-storage.docs b/debian/frostfs-storage.docs
deleted file mode 100644
index cd1f5f23f..000000000
--- a/debian/frostfs-storage.docs
+++ /dev/null
@@ -1,4 +0,0 @@
-docs/storage-node-configuration.md
-CONTRIBUTING.md
-CREDITS.md
-README.md
diff --git a/debian/frostfs-storage.install b/debian/frostfs-storage.install
deleted file mode 100644
index 670935e7b..000000000
--- a/debian/frostfs-storage.install
+++ /dev/null
@@ -1 +0,0 @@
-bin/frostfs-node usr/bin
diff --git a/debian/frostfs-storage.postinst b/debian/frostfs-storage.postinst
deleted file mode 100755
index 88fa53be5..000000000
--- a/debian/frostfs-storage.postinst
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/sh
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * `configure'
-# * `abort-upgrade'
-# * `abort-remove' `in-favour'
-#
-# * `abort-remove'
-# * `abort-deconfigure' `in-favour'
-# `removing'
-#
-# for details, see https://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-case "$1" in
- configure)
- USERNAME=storage
- id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -M -U -c "FrostFS Storage node" frostfs-$USERNAME
- if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
- chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
- chmod -f 0750 /etc/frostfs/$USERNAME
- chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml
- chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml
- chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
- chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
- fi
- USERDIR=$(getent passwd frostfs-$USERNAME | cut -d: -f6)
- if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
- chown -f frostfs-$USERNAME: "$USERDIR"
- fi
- USERDIR=/srv/frostfs
- if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then
- chown -f frostfs-$USERNAME: $USERDIR
- fi
- ;;
-
- abort-upgrade|abort-remove|abort-deconfigure)
- ;;
-
- *)
- echo "postinst called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/frostfs-storage.postrm b/debian/frostfs-storage.postrm
deleted file mode 100755
index d9c8c9656..000000000
--- a/debian/frostfs-storage.postrm
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * `remove'
-# * `purge'
-# * `upgrade'
-# * `failed-upgrade'
-# * `abort-install'
-# * `abort-install'
-# * `abort-upgrade'
-# * `disappear'
-#
-# for details, see https://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-
-case "$1" in
- purge)
- rm -rf /var/lib/frostfs/storage/*
- ;;
-
- remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
- ;;
-
- *)
- echo "postrm called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/frostfs-storage.preinst b/debian/frostfs-storage.preinst
deleted file mode 100755
index 37f952537..000000000
--- a/debian/frostfs-storage.preinst
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * `install'
-# * `install'
-# * `upgrade'
-# * `abort-upgrade'
-# for details, see https://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-
-case "$1" in
- install|upgrade)
- ;;
-
- abort-upgrade)
- ;;
-
- *)
- echo "preinst called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/frostfs-storage.prerm b/debian/frostfs-storage.prerm
deleted file mode 100755
index 0da369d75..000000000
--- a/debian/frostfs-storage.prerm
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * `remove'
-# * `upgrade'
-# * `failed-upgrade'
-# * `remove' `in-favour'
-# * `deconfigure' `in-favour'
-# `removing'
-#
-# for details, see https://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-
-case "$1" in
- remove|upgrade|deconfigure)
- ;;
-
- failed-upgrade)
- ;;
-
- *)
- echo "prerm called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/frostfs-storage.service b/debian/frostfs-storage.service
deleted file mode 100644
index 573961756..000000000
--- a/debian/frostfs-storage.service
+++ /dev/null
@@ -1,17 +0,0 @@
-[Unit]
-Description=FrostFS Storage node
-Requires=network.target
-
-[Service]
-Type=notify
-NotifyAccess=all
-ExecStart=/usr/bin/frostfs-node --config /etc/frostfs/storage/config.yml
-User=frostfs-storage
-Group=frostfs-storage
-WorkingDirectory=/srv/frostfs
-Restart=always
-RestartSec=5
-PrivateTmp=true
-
-[Install]
-WantedBy=multi-user.target
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index 0dd8ee399..000000000
--- a/debian/rules
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/make -f
-
-# Do not try to strip Go binaries
-export DEB_BUILD_OPTIONS := nostrip
-
-%:
- dh $@ --with bash-completion
-
-override_dh_auto_test:
-
-override_dh_auto_install:
- echo $(DEB_BUILD_OPTIONS)
- dh_auto_install
-
- bin/frostfs-adm gendoc --type man man/
- bin/frostfs-cli gendoc --type man man/
-
- bin/frostfs-adm completion bash > debian/frostfs-adm.bash-completion
- bin/frostfs-cli completion bash > debian/frostfs-cli.bash-completion
- install -m 0755 -d debian/frostfs-cli/usr/share/fish/completions/
- install -m 0755 -d debian/frostfs-cli/usr/share/zsh/vendor-completions/
- bin/frostfs-adm completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-adm.fish
- bin/frostfs-adm completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-adm
- bin/frostfs-cli completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-cli.fish
- bin/frostfs-cli completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-cli
-
- install -T -m 0640 config/example/ir.yaml debian/frostfs-ir/etc/frostfs/ir/config.yml
- install -T -m 0640 config/example/ir-control.yaml debian/frostfs-ir/etc/frostfs/ir/control.yml
- install -T -m 0640 config/example/node.yaml debian/frostfs-storage/etc/frostfs/storage/config.yml
- install -T -m 0640 config/example/node-control.yaml debian/frostfs-storage/etc/frostfs/storage/control.yml
-
-override_dh_installsystemd:
- dh_installsystemd --no-enable --no-start --name=frostfs-ir
- dh_installsystemd --no-enable --no-start --name=frostfs-storage
-
-override_dh_installchangelogs:
- dh_installchangelogs -k CHANGELOG.md
-
-override_dh_installdocs:
- dh_installdocs
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8d8..000000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json
index 990fd42a8..b68ce4fa3 100644
--- a/dev/.vscode-example/launch.json
+++ b/dev/.vscode-example/launch.json
@@ -42,7 +42,6 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
- "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080",
@@ -78,7 +77,12 @@
"FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s1/pilorama1",
"FROSTFS_PROMETHEUS_ENABLED":"true",
"FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9090",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s"
+ "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
+ "FROSTFS_TRACING_ENABLED":"true",
+ "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
+ "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
+ "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
+ "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8080"
},
"postDebugTask": "env-down"
},
@@ -93,7 +97,6 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
- "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082",
@@ -129,7 +132,12 @@
"FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s2/pilorama1",
"FROSTFS_PROMETHEUS_ENABLED":"true",
"FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9091",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s"
+ "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
+ "FROSTFS_TRACING_ENABLED":"true",
+ "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
+ "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
+ "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
+ "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8082"
},
"postDebugTask": "env-down"
},
@@ -144,7 +152,6 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
- "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084",
@@ -180,7 +187,12 @@
"FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s3/pilorama1",
"FROSTFS_PROMETHEUS_ENABLED":"true",
"FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9092",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s"
+ "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
+ "FROSTFS_TRACING_ENABLED":"true",
+ "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
+ "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
+ "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
+ "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8084"
},
"postDebugTask": "env-down"
},
@@ -195,7 +207,6 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
- "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086",
@@ -231,7 +242,12 @@
"FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s4/pilorama1",
"FROSTFS_PROMETHEUS_ENABLED":"true",
"FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9093",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s"
+ "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
+ "FROSTFS_TRACING_ENABLED":"true",
+ "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
+ "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
+ "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
+ "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8086"
},
"postDebugTask": "env-down"
}
diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml
index 9d026797c..40ed35aeb 100644
--- a/dev/docker-compose.yml
+++ b/dev/docker-compose.yml
@@ -3,7 +3,7 @@
version: "2.4"
services:
neo-go:
- image: nspccdev/neo-go:0.105.0
+ image: nspccdev/neo-go:0.106.0
container_name: neo-go
command: ["node", "--config-path", "/config", "--privnet", "--debug"]
stop_signal: SIGKILL
@@ -14,3 +14,15 @@ services:
- ./neo-go/node-wallet.json:/wallets/node-wallet.json
- ./neo-go/config.yml:/wallets/config.yml
- ./neo-go/wallet.json:/wallets/wallet.json
+ jaeger:
+ image: jaegertracing/all-in-one:latest
+ container_name: jaeger
+ ports:
+ - '4317:4317' #OTLP over gRPC
+ - '4318:4318' #OTLP over HTTP
+ - '16686:16686' #frontend
+ stop_signal: SIGKILL
+ environment:
+ - COLLECTOR_OTLP_ENABLED=true
+ - SPAN_STORAGE_TYPE=badger
+ - BADGER_EPHEMERAL=true
diff --git a/docs/building-deb-package.md b/docs/building-deb-package.md
deleted file mode 100644
index 26a77a27f..000000000
--- a/docs/building-deb-package.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Building Debian package on host
-
-## Prerequisites
-
-For now, we're assuming building for Debian 11 (stable) x86_64.
-
-Go version 18.4 or later should already be installed, i.e. this runs
-successfully:
-
-* `make all`
-
-## Installing packaging dependencies
-
-```shell
-$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
-```
-
-Warining: number of package installed is pretty large considering dependecies.
-
-## Package building
-
-```shell
-$ make debpackage
-```
-
-## Leftovers cleaning
-
-```shell
-$ make debclean
-```
-or
-```shell
-$ dh clean
-```
-
-# Package versioning
-
-By default, package version is based on product version and may also contain git
-tags and hashes.
-
-Package version could be overwritten by setting `PKG_VERSION` variable before
-build, Debian package versioning rules should be respected.
-
-```shell
-$ PKG_VERSION=0.32.0 make debpackge
-```
diff --git a/docs/evacuation.md b/docs/evacuation.md
index 885ce169a..d47d56d15 100644
--- a/docs/evacuation.md
+++ b/docs/evacuation.md
@@ -20,7 +20,12 @@ Because it is necessary to prevent removing by policer objects with policy `REP
## Commands
-`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
+`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag.
+By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
+To evacuate objects only from containers with policy `REP 1` use option `--rep-one-only`.
+To adjust resource consumption required for evacuation use options:
+ - `--container-worker-count` count of concurrent container evacuation workers
+ - `--object-worker-count` count of concurrent object evacuation workers
`frostfs-cli control shards evacuation stop` stops running evacuation process.
diff --git a/docs/release-instruction.md b/docs/release-instruction.md
index 3aebc8e66..aa867e83c 100644
--- a/docs/release-instruction.md
+++ b/docs/release-instruction.md
@@ -43,11 +43,6 @@ Write new revision number into the root `VERSION` file:
$ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION
```
-Update version in Debian package changelog file
-```
-$ cat debian/changelog
-```
-
Update the supported version of `TrueCloudLab/frostfs-contract` module in root
`README.md` if needed.
@@ -60,7 +55,7 @@ Add an entry to the `CHANGELOG.md` following the style established there.
* update `Unreleased...new` and `new...old` diff-links at the bottom of the file
* add optional codename and release date in the heading
* remove all empty sections such as `Added`, `Removed`, etc.
-* make sure all changes have references to GitHub issues in `#123` format (if possible)
+* make sure all changes have references to relevant issues in `#123` format (if possible)
* clean up all `Unreleased` sections and leave them empty
### Make release commit
@@ -100,24 +95,20 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release
-### Prepare and push images to a Docker Hub (if not automated)
+### Prepare and push images to a Docker registry (automated)
-Create Docker images for all applications and push them into Docker Hub
-(requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
+Create Docker images for all applications and push them into container registry
+(executed automatically in Forgejo Actions upon pushing a release tag):
```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images
-$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
-$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
-$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
-$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
-$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
+$ make push-images
```
-### Make a proper GitHub release (if not automated)
+### Make a proper release (if not automated)
-Edit an automatically-created release on GitHub, copy things from `CHANGELOG.md`.
+Edit an automatically-created release on git.frostfs.info, copy things from `CHANGELOG.md`.
Build and tar release binaries with `make prepare-release`, attach them to
the release. Publish the release.
@@ -126,7 +117,7 @@ the release. Publish the release.
Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
with new versions.
-### Close GitHub milestone
+### Close milestone
Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
diff --git a/docs/shard-modes.md b/docs/shard-modes.md
index 3b459335b..6cc4ab13c 100644
--- a/docs/shard-modes.md
+++ b/docs/shard-modes.md
@@ -51,10 +51,7 @@ However, all mode changing operations are idempotent.
## Automatic mode changes
-Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
-1. If the metabase was not available or couldn't be opened/initialized during shard startup.
-2. If shard error counter exceeds threshold.
-3. If the metabase couldn't be reopened during SIGHUP handling.
+A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
# Detach shard
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index c74695e2b..da9fdfed0 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -12,21 +12,23 @@ There are some custom types used for brevity:
# Structure
-| Section | Description |
-|------------------------|---------------------------------------------------------------------|
-| `logger` | [Logging parameters](#logger-section) |
-| `pprof` | [PProf configuration](#pprof-section) |
-| `prometheus` | [Prometheus metrics configuration](#prometheus-section) |
-| `control` | [Control service configuration](#control-section) |
-| `contracts` | [Override FrostFS contracts hashes](#contracts-section) |
-| `morph` | [N3 blockchain client configuration](#morph-section) |
-| `apiclient` | [FrostFS API client configuration](#apiclient-section) |
-| `policer` | [Policer service configuration](#policer-section) |
-| `replicator` | [Replicator service configuration](#replicator-section) |
-| `storage` | [Storage engine configuration](#storage-section) |
-| `runtime` | [Runtime configuration](#runtime-section) |
-| `audit` | [Audit configuration](#audit-section) |
-
+| Section | Description |
+|--------------|---------------------------------------------------------|
+| `node` | [Node parameters](#node-section) |
+| `logger` | [Logging parameters](#logger-section) |
+| `pprof` | [PProf configuration](#pprof-section) |
+| `prometheus` | [Prometheus metrics configuration](#prometheus-section) |
+| `control` | [Control service configuration](#control-section) |
+| `contracts` | [Override FrostFS contracts hashes](#contracts-section) |
+| `morph` | [N3 blockchain client configuration](#morph-section) |
+| `apiclient` | [FrostFS API client configuration](#apiclient-section) |
+| `policer` | [Policer service configuration](#policer-section) |
+| `replicator` | [Replicator service configuration](#replicator-section) |
+| `storage` | [Storage engine configuration](#storage-section) |
+| `runtime` | [Runtime configuration](#runtime-section) |
+| `audit` | [Audit configuration](#audit-section) |
+| `multinet` | [Multinet configuration](#multinet-section) |
+| `qos` | [QoS configuration](#qos-section) |
# `control` section
```yaml
@@ -110,11 +112,21 @@ Contains logger parameters.
```yaml
logger:
level: info
+ tags:
+ - names: "main, morph"
+ level: debug
```
-| Parameter | Type | Default value | Description |
-|-----------|----------|---------------|---------------------------------------------------------------------------------------------------|
-| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
+| Parameter | Type | Default value | Description |
+|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------|
+| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
+| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. |
+
+## `tags` subsection
+| Parameter | Type | Default value | Description |
+|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. |
+| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. |
# `contracts` section
Contains override values for FrostFS side-chain contract hashes. Most of the time contract
@@ -147,15 +159,19 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
switch_interval: 2m
+ netmap:
+ candidates:
+ poll_interval: 20s
```
-| Parameter | Type | Default value | Description |
-| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
-| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
-| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
-| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
-| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
+| Parameter | Type | Default value | Description |
+|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
+| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
+| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
+| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
+| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
+| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
## `rpc_endpoint` subsection
| Parameter | Type | Default value | Description |
@@ -169,7 +185,6 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
-| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@@ -180,20 +195,41 @@ Contains configuration for each shard. Keys must be consecutive numbers starting
`default` subsection has the same format and specifies defaults for missing values.
The following table describes configuration for each shard.
-| Parameter | Type | Default value | Description |
-| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `compress` | `bool` | `false` | Flag to enable compression. |
-| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
-| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
-| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
-| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
-| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
-| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
-| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
-| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
-| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
-| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
-| `gc` | [GC config](#gc-subsection) | | GC configuration. |
+| Parameter | Type | Default value | Description |
+| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- |
+| `compression` | [Compression config](#compression-subsection) | | Compression config. |
+| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
+| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
+| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
+| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
+| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
+| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
+| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
+| `gc` | [GC config](#gc-subsection) | | GC configuration. |
+| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
+
+### `compression` subsection
+
+Contains compression config.
+
+```yaml
+compression:
+ enabled: true
+ level: smallest_size
+ exclude_content_types:
+ - audio/*
+ - video/*
+ estimate_compressibility: true
+ estimate_compressibility_threshold: 0.7
+```
+
+| Parameter | Type | Default value | Description |
+| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `enabled` | `bool` | `false` | Flag to enable compression. |
+| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. |
+| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
+| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
+| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
### `blobstor` subsection
@@ -208,7 +244,7 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
- perm: 0644
+ perm: 0o644
size: 4194304
depth: 1
width: 4
@@ -268,7 +304,7 @@ gc:
```yaml
metabase:
path: /path/to/meta.db
- perm: 0644
+ perm: 0o644
max_batch_size: 200
max_batch_delay: 20ms
```
@@ -300,6 +336,65 @@ writecache:
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
+### `limits` subsection
+
+```yaml
+limits:
+ max_read_running_ops: 10000
+ max_read_waiting_ops: 1000
+ max_write_running_ops: 1000
+ max_write_waiting_ops: 100
+ read:
+ - tag: internal
+ weight: 20
+ limit_ops: 0
+ reserved_ops: 1000
+ - tag: client
+ weight: 70
+ reserved_ops: 10000
+ - tag: background
+ weight: 5
+ limit_ops: 10000
+ reserved_ops: 0
+ - tag: writecache
+ weight: 5
+ limit_ops: 25000
+ - tag: policer
+ weight: 5
+ limit_ops: 25000
+ write:
+ - tag: internal
+ weight: 200
+ limit_ops: 0
+ reserved_ops: 100
+ - tag: client
+ weight: 700
+ reserved_ops: 1000
+ - tag: background
+ weight: 50
+ limit_ops: 1000
+ reserved_ops: 0
+ - tag: writecache
+ weight: 50
+ limit_ops: 2500
+ - tag: policer
+ weight: 50
+ limit_ops: 2500
+```
+
+| Parameter | Type | Default value | Description |
+| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
+| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
+| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
+| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
+| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
+| `read` | `[]tag` | empty | Array of shard read settings for tags. |
+| `write` | `[]tag` | empty | Array of shard write settings for tags. |
+| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
+| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
+| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
+| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
+| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section
@@ -315,22 +410,22 @@ node:
- "Price:11"
- "UN-LOCODE:RU MSK"
- "key:value"
- relay: false
persistent_sessions:
path: /sessions
persistent_state:
path: /state
+ locode_db_path: "/path/to/locode/db"
```
-| Parameter | Type | Default value | Description |
-|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------|
-| `key` | `string` | | Path to the binary-encoded private key. |
-| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
-| `addresses` | `[]string` | | Addresses advertised in the netmap. |
-| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. |
-| `relay` | `bool` | | Enable relay mode. |
-| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. |
-| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. |
+| Parameter | Type | Default value | Description |
+|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------|
+| `key` | `string` | | Path to the binary-encoded private key. |
+| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
+| `addresses` | `[]string` | | Addresses advertised in the netmap. |
+| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. |
+| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. |
+| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. |
+| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. |
## `wallet` subsection
N3 wallet configuration.
@@ -395,25 +490,46 @@ replicator:
pool_size: 10
```
-| Parameter | Type | Default value | Description |
-|---------------|------------|----------------------------------------|---------------------------------------------|
-| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
-| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. |
+| Parameter | Type | Default value | Description |
+|---------------|------------|---------------|---------------------------------------------|
+| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
+| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. |
# `object` section
Contains object-service related parameters.
```yaml
object:
- put:
- remote_pool_size: 100
+ get:
+ priority:
+ - $attribute:ClusterName
```
-| Parameter | Type | Default value | Description |
-|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------|
-| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
-| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
-| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
+| Parameter | Type | Default value | Description |
+|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
+| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
+| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
+
+
+# `rpc` section
+Contains limits on the number of active RPC for specified method(s).
+
+```yaml
+rpc:
+ limits:
+ - methods:
+ - /neo.fs.v2.object.ObjectService/PutSingle
+ - /neo.fs.v2.object.ObjectService/Put
+ max_ops: 1000
+ - methods:
+ - /neo.fs.v2.object.ObjectService/Get
+ max_ops: 10000
+```
+
+| Parameter | Type | Default value | Description |
+|------------------|------------|---------------|--------------------------------------------------------------|
+| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) |
+| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit |
# `runtime` section
Contains runtime parameters.
@@ -435,6 +551,52 @@ audit:
enabled: true
```
-| Parameter | Type | Default value | Description |
-|---------------------|--------|---------------|---------------------------------------------------|
-| `soft_memory_limit` | `bool` | false | If `true` then audit event logs will be recorded. |
+| Parameter | Type | Default value | Description |
+|-----------|--------|---------------|---------------------------------------------------|
+| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. |
+
+
+# `multinet` section
+Contains multinet parameters.
+
+```yaml
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
+```
+
+| Parameter | Type | Default value | Description |
+| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- |
+| `enabled` | `bool` | false | If `true` then source-based routing is enabled. |
+| `subnets` | `subnet` | empty | Resulting subnets. |
+| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
+| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
+| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
+
+# `qos` section
+```yaml
+qos:
+ critical:
+ authorized_keys:
+ - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
+ - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
+ internal:
+ authorized_keys:
+ - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
+ - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
+```
+| Parameter | Type | Default value | Description |
+| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- |
+| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. |
+| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. |
diff --git a/docs/update-go-instruction.md b/docs/update-go-instruction.md
index f99225046..195e0c6b3 100644
--- a/docs/update-go-instruction.md
+++ b/docs/update-go-instruction.md
@@ -7,7 +7,7 @@
## Update CI
Change Golang versions for unit test in CI.
-There is `go` section in `.github/workflows/go.yaml` file:
+There is `go` section in `.forgejo/workflows/*.yml` files:
```yaml
jobs:
test:
diff --git a/go.mod b/go.mod
index 621d2e85d..6f1950936 100644
--- a/go.mod
+++ b/go.mod
@@ -1,23 +1,25 @@
module git.frostfs.info/TrueCloudLab/frostfs-node
-go 1.22
+go 1.23.0
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb
- git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
+ git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
- git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
- git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad
+ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2
+ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
+ git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa
git.frostfs.info/TrueCloudLab/hrw v1.2.1
- git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
+ git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
+ git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/VictoriaMetrics/easyproto v0.1.4
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
+ github.com/felixge/fgprof v0.9.5
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
github.com/gdamore/tcell/v2 v2.7.4
github.com/go-pkgz/expirable-cache/v3 v3.0.0
@@ -25,10 +27,9 @@ require (
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/klauspost/compress v1.17.4
github.com/mailru/easyjson v0.7.7
- github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.2.0
- github.com/multiformats/go-multiaddr v0.12.1
- github.com/nspcc-dev/neo-go v0.106.2
+ github.com/multiformats/go-multiaddr v0.15.0
+ github.com/nspcc-dev/neo-go v0.106.3
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
github.com/prometheus/client_golang v1.19.0
@@ -40,15 +41,14 @@ require (
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10
- go.opentelemetry.io/otel v1.28.0
- go.opentelemetry.io/otel/trace v1.28.0
+ go.opentelemetry.io/otel v1.31.0
+ go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/zap v1.27.0
- golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
- golang.org/x/sync v0.7.0
- golang.org/x/sys v0.22.0
- golang.org/x/term v0.21.0
- google.golang.org/grpc v1.66.2
- google.golang.org/protobuf v1.34.2
+ golang.org/x/sync v0.12.0
+ golang.org/x/sys v0.31.0
+ golang.org/x/term v0.30.0
+ google.golang.org/grpc v1.69.2
+ google.golang.org/protobuf v1.36.1
gopkg.in/yaml.v3 v3.0.1
)
@@ -60,7 +60,7 @@ require (
require (
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
- github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
@@ -76,6 +76,7 @@ require (
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
+ github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
@@ -84,9 +85,9 @@ require (
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/ipfs/go-cid v0.4.1 // indirect
+ github.com/ipfs/go-cid v0.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
- github.com/klauspost/cpuid/v2 v2.2.6 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/klauspost/reedsolomon v1.12.1 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
@@ -100,7 +101,7 @@ require (
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
- github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
+ github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -118,18 +119,19 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
- go.opentelemetry.io/otel/metric v1.28.0 // indirect
- go.opentelemetry.io/otel/sdk v1.28.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.24.0 // indirect
- golang.org/x/net v0.26.0 // indirect
- golang.org/x/text v0.16.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
+ golang.org/x/crypto v0.36.0 // indirect
+ golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
+ golang.org/x/net v0.30.0 // indirect
+ golang.org/x/text v0.23.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
- lukechampine.com/blake3 v1.2.1 // indirect
+ lukechampine.com/blake3 v1.4.0 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
-replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928
+replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07
diff --git a/go.sum b/go.sum
index 4d21d9bca..5b075f60a 100644
--- a/go.sum
+++ b/go.sum
@@ -1,23 +1,25 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
-git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
-git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
+git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY=
+git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
+git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU=
+git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
-git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
-git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
-git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
-git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
+git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
+git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
+git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
+git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
+git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA=
+git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
@@ -27,8 +29,8 @@ git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
-github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
-github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
+github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
+github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
@@ -39,6 +41,9 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
+github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
+github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
@@ -66,6 +71,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
+github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
+github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
@@ -89,6 +96,9 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw=
github.com/go-pkgz/expirable-cache/v3 v3.0.0/go.mod h1:2OQiDyEGQalYecLWmXprm3maPXeVb5/6/X7yRPYTzec=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
+github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -98,6 +108,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -107,6 +119,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
+github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -128,16 +142,17 @@ github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
-github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
+github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
+github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
-github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
-github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q=
github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -146,6 +161,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
+github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
@@ -165,8 +181,6 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
@@ -178,8 +192,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
-github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk=
-github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE=
+github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo=
+github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
@@ -188,8 +202,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
@@ -208,6 +222,7 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo=
github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
@@ -256,6 +271,7 @@ github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4J
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -278,20 +294,22 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
-go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
-go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
-go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
-go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
-go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
-go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
-go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
-go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -306,15 +324,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
-golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
-golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -327,16 +345,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
+golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -359,19 +377,20 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -379,26 +398,26 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
-golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
+golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
+golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
-google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
-google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
-google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
+google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -407,8 +426,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -426,7 +445,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
-lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
+lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w=
+lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
diff --git a/internal/assert/cond.go b/internal/assert/cond.go
new file mode 100644
index 000000000..113d2eba9
--- /dev/null
+++ b/internal/assert/cond.go
@@ -0,0 +1,29 @@
+package assert
+
+import (
+ "fmt"
+ "strings"
+)
+
+func True(cond bool, details ...string) {
+ if !cond {
+ panic(strings.Join(details, " "))
+ }
+}
+
+func False(cond bool, details ...string) {
+ if cond {
+ panic(strings.Join(details, " "))
+ }
+}
+
+func NoError(err error, details ...string) {
+ if err != nil {
+ content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " "))
+ panic(content)
+ }
+}
+
+func Fail(details ...string) {
+ panic(strings.Join(details, " "))
+}
diff --git a/internal/audit/request.go b/internal/audit/request.go
index cf0797300..17666ab4b 100644
--- a/internal/audit/request.go
+++ b/internal/audit/request.go
@@ -1,10 +1,12 @@
package audit
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "context"
+
crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
@@ -17,15 +19,15 @@ type Target interface {
String() string
}
-func LogRequest(log *logger.Logger, operation string, req Request, target Target, status bool) {
+func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) {
var key []byte
if req != nil {
key = req.GetVerificationHeader().GetBodySignature().GetKey()
}
- LogRequestWithKey(log, operation, key, target, status)
+ LogRequestWithKey(ctx, log, operation, key, target, status)
}
-func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target Target, status bool) {
+func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) {
object, subject := NotDefined, NotDefined
publicKey := crypto.UnmarshalPublicKey(key)
@@ -37,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target
object = target.String()
}
- log.Info(logs.AuditEventLogRecord,
+ log.Info(ctx, logs.AuditEventLogRecord,
zap.String("operation", operation),
zap.String("object", object),
zap.String("subject", subject),
diff --git a/internal/audit/target.go b/internal/audit/target.go
index 8bc87ee8e..2d6881e29 100644
--- a/internal/audit/target.go
+++ b/internal/audit/target.go
@@ -3,7 +3,7 @@ package audit
import (
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 7aef6873e..626372f43 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -14,13 +14,9 @@ const (
InterruptPlacementIterationByContext = "interrupt placement iteration by context"
Notification = "notification"
-
- SkipDeprecatedNotification = "skip deprecated notification"
)
const (
- InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations"
- InnerringCantStopEpochEstimation = "can't stop epoch estimation"
InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain"
InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain"
InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
@@ -41,8 +37,6 @@ const (
InnerringCantUpdatePersistentState = "can't update persistent state"
InnerringCloserError = "closer error"
InnerringReadConfigFromBlockchain = "read config from blockchain"
- NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications"
- NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification"
PolicerCouldNotGetContainer = "could not get container"
PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
@@ -61,7 +55,6 @@ const (
ReplicatorCouldNotReplicateObject = "could not replicate object"
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
TreeRedirectingTreeServiceQuery = "redirecting tree service query"
- TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL"
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
TreeSynchronizeTree = "synchronize tree"
@@ -107,7 +100,6 @@ const (
GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed"
GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object"
GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object"
- GetUnableToGetECObjectContainer = "unable to get container for erasure-coded object"
GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object"
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
GetAssemblingECObjectCompleted = "assembling erasure-coded object completed"
@@ -133,7 +125,6 @@ const (
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
SearchLocalOperationFailed = "local operation failed"
UtilObjectServiceError = "object service error"
- UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool"
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
@@ -148,14 +139,12 @@ const (
ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked"
ClientNotaryRequestInvoked = "notary request invoked"
ClientNotaryDepositTransactionWasSuccessfullyPersisted = "notary deposit transaction was successfully persisted"
- ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted = "attempt to wait for notary deposit transaction to get persisted"
ClientNeoClientInvoke = "neo client invoke"
ClientNativeGasTransferInvoke = "native gas transfer invoke"
ClientBatchGasTransferInvoke = "batch gas transfer invoke"
ClientCantGetBlockchainHeight = "can't get blockchain height"
ClientCantGetBlockchainHeight243 = "can't get blockchain height"
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
- EventCouldNotStartListenToEvents = "could not start listen to events"
EventStopEventListenerByError = "stop event listener by error"
EventStopEventListenerByContext = "stop event listener by context"
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
@@ -173,17 +162,9 @@ const (
EventNotaryParserNotSet = "notary parser not set"
EventCouldNotParseNotaryEvent = "could not parse notary event"
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
- EventIgnoreNilEventParser = "ignore nil event parser"
- EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
EventRegisteredNewEventParser = "registered new event parser"
- EventIgnoreNilEventHandler = "ignore nil event handler"
- EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
EventRegisteredNewEventHandler = "registered new event handler"
- EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
- EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
- EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
- EventIgnoreNilBlockHandler = "ignore nil block handler"
StorageOperation = "local object storage operation"
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
BlobovniczaOpeningBoltDB = "opening BoltDB"
@@ -217,6 +198,7 @@ const (
EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks"
EngineInterruptGettingLockers = "can't get object's lockers"
EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks"
+ EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones"
EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only"
EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode"
EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold"
@@ -226,12 +208,6 @@ const (
EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
- MetabaseMissingMatcher = "missing matcher"
- MetabaseErrorInFKBTSelection = "error in FKBT selection"
- MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf"
- MetabaseUnknownOperation = "unknown operation"
- MetabaseCantIterateOverTheBucket = "can't iterate over the bucket"
- MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets"
MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
MetabaseCheckingMetabaseVersion = "checking metabase version"
@@ -249,6 +225,7 @@ const (
ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
ShardCouldNotUnmarshalObject = "could not unmarshal object"
+ ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted"
ShardCouldNotCloseShardComponent = "could not close shard component"
ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
@@ -276,9 +253,8 @@ const (
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
- WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache"
+ ShardCouldNotFindObject = "could not find object"
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
- WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database"
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
@@ -313,9 +289,6 @@ const (
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
ContainerDeleteContainerCheckFailed = "delete container check failed"
ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
- ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL"
- ContainerSetEACLCheckFailed = "set EACL check failed"
- ContainerCouldNotApproveSetEACL = "could not approve set EACL"
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
FrostFSCantRelaySetConfigEvent = "can't relay set config event"
FrostFSFrostfsWorkerPool = "frostfs worker pool"
@@ -360,7 +333,6 @@ const (
NetmapCantGetTransactionHeight = "can't get transaction height"
NetmapCantResetEpochTimer = "can't reset epoch timer"
NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup"
- NetmapCantStartContainerSizeEstimation = "can't start container size estimation"
NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick"
NetmapNextEpoch = "next epoch"
NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch"
@@ -412,7 +384,6 @@ const (
FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown"
FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing"
FrostFSNodeConfigurationReading = "configuration reading"
- FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
FrostFSNodePoolConfigurationUpdate = "adjust pool configuration"
@@ -421,11 +392,6 @@ const (
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
- FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers"
- FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container"
- FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object"
- FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications"
- FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification"
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
FrostFSNodeFailedInitTracing = "failed init tracing"
@@ -469,7 +435,6 @@ const (
FSTreeCantUnmarshalObject = "can't unmarshal an object"
FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
FSTreeCantUpdateID = "can't update object storage ID"
- FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB"
PutSingleRedirectFailure = "failed to redirect PutSingle request"
StorageIDRetrievalFailure = "can't get storage ID from metabase"
ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
@@ -545,4 +510,12 @@ const (
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
WritecacheCantGetObject = "can't get an object from fstree"
+ FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
+ FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
+ NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
+ FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
+ FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
+ WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
+ FailedToUpdateNetmapCandidates = "update netmap candidates failed"
+ UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used"
)
diff --git a/internal/metrics/application.go b/internal/metrics/application.go
index 8bc408ab6..53acf9b7f 100644
--- a/internal/metrics/application.go
+++ b/internal/metrics/application.go
@@ -12,8 +12,9 @@ type ApplicationInfo struct {
func NewApplicationInfo(version string) *ApplicationInfo {
appInfo := &ApplicationInfo{
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
- Name: "app_info",
- Help: "General information about the application.",
+ Namespace: namespace,
+ Name: "app_info",
+ Help: "General information about the application.",
}, []string{"version"}),
}
appInfo.versionValue.With(prometheus.Labels{"version": version})
diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go
index 3aa51c0f0..9123541ff 100644
--- a/internal/metrics/consts.go
+++ b/internal/metrics/consts.go
@@ -22,6 +22,8 @@ const (
grpcServerSubsystem = "grpc_server"
policerSubsystem = "policer"
commonCacheSubsystem = "common_cache"
+ multinetSubsystem = "multinet"
+ qosSubsystem = "qos"
successLabel = "success"
shardIDLabel = "shard_id"
@@ -41,6 +43,8 @@ const (
endpointLabel = "endpoint"
hitLabel = "hit"
cacheLabel = "cache"
+ sourceIPLabel = "source_ip"
+ ioTagLabel = "io_tag"
readWriteMode = "READ_WRITE"
readOnlyMode = "READ_ONLY"
diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go
index f6b14a632..d0cb8131f 100644
--- a/internal/metrics/innerring.go
+++ b/internal/metrics/innerring.go
@@ -17,7 +17,9 @@ type InnerRingServiceMetrics struct {
eventDuration *prometheus.HistogramVec
morphCacheMetrics *morphCacheMetrics
logMetrics logger.LogMetrics
- appInfo *ApplicationInfo
+ multinet *multinetMetrics
+ // nolint: unused
+ appInfo *ApplicationInfo
}
// NewInnerRingMetrics returns new instance of metrics collectors for inner ring.
@@ -50,6 +52,7 @@ func NewInnerRingMetrics() *InnerRingServiceMetrics {
morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace),
appInfo: NewApplicationInfo(misc.Version),
logMetrics: logger.NewLogMetrics(innerRingNamespace),
+ multinet: newMultinetMetrics(innerRingNamespace),
}
}
@@ -77,3 +80,7 @@ func (m *InnerRingServiceMetrics) MorphCacheMetrics() MorphCacheMetrics {
func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics {
return m.logMetrics
}
+
+func (m *InnerRingServiceMetrics) Multinet() MultinetMetrics {
+ return m.multinet
+}
diff --git a/internal/metrics/multinet.go b/internal/metrics/multinet.go
new file mode 100644
index 000000000..6b1f99d46
--- /dev/null
+++ b/internal/metrics/multinet.go
@@ -0,0 +1,35 @@
+package metrics
+
+import (
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type multinetMetrics struct {
+ dials *prometheus.GaugeVec
+}
+
+type MultinetMetrics interface {
+ Dial(sourceIP string, success bool)
+}
+
+func newMultinetMetrics(ns string) *multinetMetrics {
+ return &multinetMetrics{
+ dials: metrics.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: ns,
+ Subsystem: multinetSubsystem,
+ Name: "dial_count",
+ Help: "Dials count performed by multinet",
+ }, []string{sourceIPLabel, successLabel}),
+ }
+}
+
+func (m *multinetMetrics) Dial(sourceIP string, success bool) {
+ m.dials.With(prometheus.Labels{
+ sourceIPLabel: sourceIP,
+ successLabel: strconv.FormatBool(success),
+ }).Inc()
+}
diff --git a/internal/metrics/node.go b/internal/metrics/node.go
index d9e401446..8ade19eb2 100644
--- a/internal/metrics/node.go
+++ b/internal/metrics/node.go
@@ -25,7 +25,10 @@ type NodeMetrics struct {
morphClient *morphClientMetrics
morphCache *morphCacheMetrics
log logger.LogMetrics
- appInfo *ApplicationInfo
+ multinet *multinetMetrics
+ qos *QoSMetrics
+ // nolint: unused
+ appInfo *ApplicationInfo
}
func NewNodeMetrics() *NodeMetrics {
@@ -52,6 +55,8 @@ func NewNodeMetrics() *NodeMetrics {
morphCache: newMorphCacheMetrics(namespace),
log: logger.NewLogMetrics(namespace),
appInfo: NewApplicationInfo(misc.Version),
+ multinet: newMultinetMetrics(namespace),
+ qos: newQoSMetrics(),
}
}
@@ -119,3 +124,11 @@ func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics {
func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
return m.log
}
+
+func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
+ return m.multinet
+}
+
+func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
+ return m.qos
+}
diff --git a/internal/metrics/object.go b/internal/metrics/object.go
index 0ba994ed3..e4f6dfde1 100644
--- a/internal/metrics/object.go
+++ b/internal/metrics/object.go
@@ -9,13 +9,14 @@ import (
)
type ObjectServiceMetrics interface {
- AddRequestDuration(method string, d time.Duration, success bool)
+ AddRequestDuration(method string, d time.Duration, success bool, ioTag string)
AddPayloadSize(method string, size int)
}
type objectServiceMetrics struct {
- methodDuration *prometheus.HistogramVec
- payloadCounter *prometheus.CounterVec
+ methodDuration *prometheus.HistogramVec
+ payloadCounter *prometheus.CounterVec
+ ioTagOpsCounter *prometheus.CounterVec
}
func newObjectServiceMetrics() *objectServiceMetrics {
@@ -32,14 +33,24 @@ func newObjectServiceMetrics() *objectServiceMetrics {
Name: "request_payload_bytes",
Help: "Object Service request payload",
}, []string{methodLabel}),
+ ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: objectSubsystem,
+ Name: "requests_total",
+ Help: "Count of requests for each IO tag",
+ }, []string{methodLabel, ioTagLabel}),
}
}
-func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) {
+func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) {
m.methodDuration.With(prometheus.Labels{
methodLabel: method,
successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds())
+ m.ioTagOpsCounter.With(prometheus.Labels{
+ ioTagLabel: ioTag,
+ methodLabel: method,
+ }).Inc()
}
func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {
diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go
new file mode 100644
index 000000000..be6878142
--- /dev/null
+++ b/internal/metrics/qos.go
@@ -0,0 +1,52 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type QoSMetrics struct {
+ opsCounter *prometheus.GaugeVec
+}
+
+func newQoSMetrics() *QoSMetrics {
+ return &QoSMetrics{
+ opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: qosSubsystem,
+ Name: "operations_total",
+ Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
+ }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
+ }
+}
+
+func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) {
+ m.opsCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ operationLabel: operation,
+ ioTagLabel: tag,
+ typeLabel: "pending",
+ }).Set(float64(pending))
+ m.opsCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ operationLabel: operation,
+ ioTagLabel: tag,
+ typeLabel: "in_progress",
+ }).Set(float64(inProgress))
+ m.opsCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ operationLabel: operation,
+ ioTagLabel: tag,
+ typeLabel: "completed",
+ }).Set(float64(completed))
+ m.opsCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ operationLabel: operation,
+ ioTagLabel: tag,
+ typeLabel: "resource_exhausted",
+ }).Set(float64(resourceExhausted))
+}
+
+func (m *QoSMetrics) Close(shardID string) {
+ m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+}
diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go
index 6702aa83c..e192c4398 100644
--- a/internal/metrics/treeservice.go
+++ b/internal/metrics/treeservice.go
@@ -12,12 +12,14 @@ type TreeMetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
+ AddOperation(string, string)
}
type treeServiceMetrics struct {
replicateTaskDuration *prometheus.HistogramVec
replicateWaitDuration *prometheus.HistogramVec
syncOpDuration *prometheus.HistogramVec
+ ioTagOpsCounter *prometheus.CounterVec
}
var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
@@ -42,6 +44,12 @@ func newTreeServiceMetrics() *treeServiceMetrics {
Name: "sync_duration_seconds",
Help: "Duration of synchronization operations",
}, []string{successLabel}),
+ ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: treeServiceSubsystem,
+ Name: "requests_total",
+ Help: "Count of requests for each IO tag",
+ }, []string{methodLabel, ioTagLabel}),
}
}
@@ -62,3 +70,10 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) {
successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds())
}
+
+func (m *treeServiceMetrics) AddOperation(op string, ioTag string) {
+ m.ioTagOpsCounter.With(prometheus.Labels{
+ ioTagLabel: ioTag,
+ methodLabel: op,
+ }).Inc()
+}
diff --git a/internal/net/config.go b/internal/net/config.go
new file mode 100644
index 000000000..b84ac3b35
--- /dev/null
+++ b/internal/net/config.go
@@ -0,0 +1,69 @@
+package net
+
+import (
+ "errors"
+ "fmt"
+ "net/netip"
+ "slices"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+var errEmptySourceIPList = errors.New("empty source IP list")
+
+type Subnet struct {
+ Prefix string
+ SourceIPs []string
+}
+
+type Config struct {
+ Enabled bool
+ Subnets []Subnet
+ Balancer string
+ Restrict bool
+ FallbackDelay time.Duration
+ Metrics metrics.MultinetMetrics
+}
+
+func (c Config) toMultinetConfig() (multinet.Config, error) {
+ var subnets []multinet.Subnet
+ for _, s := range c.Subnets {
+ var ms multinet.Subnet
+ p, err := netip.ParsePrefix(s.Prefix)
+ if err != nil {
+ return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
+ }
+ ms.Prefix = p
+ for _, ip := range s.SourceIPs {
+ addr, err := netip.ParseAddr(ip)
+ if err != nil {
+ return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
+ }
+ ms.SourceIPs = append(ms.SourceIPs, addr)
+ }
+ if len(ms.SourceIPs) == 0 {
+ return multinet.Config{}, errEmptySourceIPList
+ }
+ subnets = append(subnets, ms)
+ }
+ return multinet.Config{
+ Subnets: subnets,
+ Balancer: multinet.BalancerType(c.Balancer),
+ Restrict: c.Restrict,
+ FallbackDelay: c.FallbackDelay,
+ Dialer: newDefaulDialer(),
+ EventHandler: newEventHandler(c.Metrics),
+ }, nil
+}
+
+func (c Config) equals(other Config) bool {
+ return c.Enabled == other.Enabled &&
+ slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
+ return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
+ }) &&
+ c.Balancer == other.Balancer &&
+ c.Restrict == other.Restrict &&
+ c.FallbackDelay == other.FallbackDelay
+}
diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go
new file mode 100644
index 000000000..6265f1860
--- /dev/null
+++ b/internal/net/dial_target.go
@@ -0,0 +1,54 @@
+// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
+
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package net
+
+import (
+ "net/url"
+ "strings"
+)
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+ net := "tcp"
+ m1 := strings.Index(target, ":")
+ m2 := strings.Index(target, ":/")
+ // handle unix:addr which will fail with url.Parse
+ if m1 >= 0 && m2 < 0 {
+ if n := target[0:m1]; n == "unix" {
+ return n, target[m1+1:]
+ }
+ }
+ if m2 >= 0 {
+ t, err := url.Parse(target)
+ if err != nil {
+ return net, target
+ }
+ scheme := t.Scheme
+ addr := t.Path
+ if scheme == "unix" {
+ if addr == "" {
+ addr = t.Host
+ }
+ return scheme, addr
+ }
+ }
+ return net, target
+}
diff --git a/internal/net/dialer.go b/internal/net/dialer.go
new file mode 100644
index 000000000..daf0f815f
--- /dev/null
+++ b/internal/net/dialer.go
@@ -0,0 +1,39 @@
+package net
+
+import (
+ "context"
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type Dialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) {
+ return d.DialContext(ctx, "tcp", address)
+}
+
+func newDefaulDialer() net.Dialer {
+ // From `grpc.WithContextDialer` comment:
+ //
+ // Note: All supported releases of Go (as of December 2023) override the OS
+ // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+ // with OS defaults for keepalive time and interval, use a net.Dialer that sets
+ // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+ // option to true from the Control field. For a concrete example of how to do
+ // this, see internal.NetDialerWithTCPKeepalive().
+ //
+ // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
+ return net.Dialer{
+ KeepAlive: time.Duration(-1),
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go
new file mode 100644
index 000000000..3d94dedc7
--- /dev/null
+++ b/internal/net/dialer_source.go
@@ -0,0 +1,83 @@
+package net
+
+import (
+ "context"
+ "net"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+type DialerSource struct {
+ guard sync.RWMutex
+
+ c Config
+
+ md multinet.Dialer
+}
+
+func NewDialerSource(c Config) (*DialerSource, error) {
+ result := &DialerSource{}
+ if err := result.build(c); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func (s *DialerSource) build(c Config) error {
+ if c.Enabled {
+ mc, err := c.toMultinetConfig()
+ if err != nil {
+ return err
+ }
+ md, err := multinet.NewDialer(mc)
+ if err != nil {
+ return err
+ }
+ s.md = md
+ s.c = c
+ return nil
+ }
+ s.md = nil
+ s.c = c
+ return nil
+}
+
+// GrpcContextDialer returns grpc.WithContextDialer func.
+// Returns nil if multinet disabled.
+func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
+ s.guard.RLock()
+ defer s.guard.RUnlock()
+
+ if s.c.Enabled {
+ return func(ctx context.Context, address string) (net.Conn, error) {
+ network, address := parseDialTarget(address)
+ return s.md.DialContext(ctx, network, address)
+ }
+ }
+ return nil
+}
+
+// NetContextDialer returns net.DialContext dial function.
+// Returns nil if multinet disabled.
+func (s *DialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
+ s.guard.RLock()
+ defer s.guard.RUnlock()
+
+ if s.c.Enabled {
+ return func(ctx context.Context, network, address string) (net.Conn, error) {
+ return s.md.DialContext(ctx, network, address)
+ }
+ }
+ return nil
+}
+
+func (s *DialerSource) Update(c Config) error {
+ s.guard.Lock()
+ defer s.guard.Unlock()
+
+ if s.c.equals(c) {
+ return nil
+ }
+ return s.build(c)
+}
diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go
new file mode 100644
index 000000000..024e5cf7c
--- /dev/null
+++ b/internal/net/event_handler.go
@@ -0,0 +1,29 @@
+package net
+
+import (
+ "net"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+var _ multinet.EventHandler = (*metricsEventHandler)(nil)
+
+type metricsEventHandler struct {
+ m metrics.MultinetMetrics
+}
+
+func (m *metricsEventHandler) DialPerformed(sourceIP net.Addr, _ string, _ string, err error) {
+ sourceIPString := "undefined"
+ if sourceIP != nil {
+ sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
+ }
+ m.m.Dial(sourceIPString, err == nil)
+}
+
+func newEventHandler(m metrics.MultinetMetrics) multinet.EventHandler {
+ if m == nil {
+ return nil
+ }
+ return &metricsEventHandler{m: m}
+}
diff --git a/internal/qos/config.go b/internal/qos/config.go
new file mode 100644
index 000000000..d90b403b5
--- /dev/null
+++ b/internal/qos/config.go
@@ -0,0 +1,31 @@
+package qos
+
+import (
+ "math"
+ "time"
+)
+
+const (
+ NoLimit int64 = math.MaxInt64
+ DefaultIdleTimeout = 5 * time.Minute
+)
+
+type LimiterConfig struct {
+ Read OpConfig
+ Write OpConfig
+}
+
+type OpConfig struct {
+ MaxWaitingOps int64
+ MaxRunningOps int64
+ IdleTimeout time.Duration
+ Tags []IOTagConfig
+}
+
+type IOTagConfig struct {
+ Tag string
+ Weight *float64
+ LimitOps *float64
+ ReservedOps *float64
+ Prohibited bool
+}
diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go
new file mode 100644
index 000000000..58cd9e52c
--- /dev/null
+++ b/internal/qos/grpc.go
@@ -0,0 +1,86 @@
+package qos
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "google.golang.org/grpc"
+)
+
+func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
+ ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String())
+ return handler(ctx, req)
+ }
+}
+
+func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor {
+ return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ rawTag, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ return invoker(ctx, method, req, reply, cc, opts...)
+ }
+ tag, err := FromRawString(rawTag)
+ if err != nil {
+ tag = IOTagClient
+ }
+ if tag.IsLocal() {
+ tag = IOTagInternal
+ }
+ ctx = tagging.ContextWithIOTag(ctx, tag.String())
+ return invoker(ctx, method, req, reply, cc, opts...)
+ }
+}
+
+func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor {
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ rawTag, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ return streamer(ctx, desc, cc, method, opts...)
+ }
+ tag, err := FromRawString(rawTag)
+ if err != nil {
+ tag = IOTagClient
+ }
+ if tag.IsLocal() {
+ tag = IOTagInternal
+ }
+ ctx = tagging.ContextWithIOTag(ctx, tag.String())
+ return streamer(ctx, desc, cc, method, opts...)
+ }
+}
+
+func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
+ if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() {
+ return handler(ctx, req)
+ }
+
+ release, ok := getLimiter().Acquire(info.FullMethod)
+ if !ok {
+ return nil, new(apistatus.ResourceExhausted)
+ }
+ defer release()
+
+ return handler(ctx, req)
+ }
+}
+
+//nolint:contextcheck (grpc.ServerStream manages the context itself)
+func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor {
+ return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() {
+ return handler(srv, ss)
+ }
+
+ release, ok := getLimiter().Acquire(info.FullMethod)
+ if !ok {
+ return new(apistatus.ResourceExhausted)
+ }
+ defer release()
+
+ return handler(srv, ss)
+ }
+}
diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go
new file mode 100644
index 000000000..7d0826754
--- /dev/null
+++ b/internal/qos/grpc_test.go
@@ -0,0 +1,219 @@
+package qos_test
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+)
+
+const (
+ okKey = "ok"
+)
+
+var (
+ errTest = errors.New("mock")
+ errWrongTag = errors.New("wrong tag")
+ errNoTag = errors.New("failed to get tag from context")
+ errResExhausted *apistatus.ResourceExhausted
+ tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
+)
+
+type mockGRPCServerStream struct {
+ grpc.ServerStream
+
+ ctx context.Context
+}
+
+func (m *mockGRPCServerStream) Context() context.Context {
+ return m.ctx
+}
+
+type limiter struct {
+ acquired bool
+ released bool
+}
+
+func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
+ l.acquired = true
+ if key != okKey {
+ return nil, false
+ }
+ return func() { l.released = true }, true
+}
+
+func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
+ interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
+ handler := func(ctx context.Context, req any) (any, error) {
+ return nil, errTest
+ }
+ _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
+ return err
+}
+
+func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
+ interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
+ handler := func(srv any, stream grpc.ServerStream) error {
+ return errTest
+ }
+ err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
+ FullMethod: methodName,
+ }, handler)
+ return err
+}
+
+func Test_MaxActiveRPCLimiter(t *testing.T) {
+ // UnaryServerInterceptor
+ t.Run("unary fail", func(t *testing.T) {
+ var lim limiter
+
+ err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
+ require.ErrorAs(t, err, &errResExhausted)
+ require.True(t, lim.acquired)
+ require.False(t, lim.released)
+ })
+ t.Run("unary pass critical", func(t *testing.T) {
+ var lim limiter
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+
+ err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
+ require.ErrorIs(t, err, errTest)
+ require.False(t, lim.acquired)
+ require.False(t, lim.released)
+ })
+ t.Run("unary pass", func(t *testing.T) {
+ var lim limiter
+
+ err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
+ require.ErrorIs(t, err, errTest)
+ require.True(t, lim.acquired)
+ require.True(t, lim.released)
+ })
+ // StreamServerInterceptor
+ t.Run("stream fail", func(t *testing.T) {
+ var lim limiter
+
+ err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
+ require.ErrorAs(t, err, &errResExhausted)
+ require.True(t, lim.acquired)
+ require.False(t, lim.released)
+ })
+ t.Run("stream pass critical", func(t *testing.T) {
+ var lim limiter
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+
+ err := streamMaxActiveRPCLimiter(ctx, &lim, "")
+ require.ErrorIs(t, err, errTest)
+ require.False(t, lim.acquired)
+ require.False(t, lim.released)
+ })
+ t.Run("stream pass", func(t *testing.T) {
+ var lim limiter
+
+ err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
+ require.ErrorIs(t, err, errTest)
+ require.True(t, lim.acquired)
+ require.True(t, lim.released)
+ })
+}
+
+func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
+ interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
+ called := false
+ handler := func(ctx context.Context, req any) (any, error) {
+ called = true
+ if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
+ return nil, nil
+ }
+ return nil, errWrongTag
+ }
+ _, err := interceptor(context.Background(), nil, nil, handler)
+ require.NoError(t, err)
+ require.True(t, called)
+}
+
+func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {
+ interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor()
+
+ // check context with no value
+ called := false
+ invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
+ called = true
+ if _, ok := tagging.IOTagFromContext(ctx); ok {
+ return fmt.Errorf("%v: expected no IO tags", errWrongTag)
+ }
+ return nil
+ }
+ require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil))
+ require.True(t, called)
+
+ // check context for internal tag
+ targetTag := qos.IOTagInternal.String()
+ invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
+ raw, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ return errNoTag
+ }
+ if raw != targetTag {
+ return errWrongTag
+ }
+ return nil
+ }
+ for _, tag := range tags {
+ ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
+ require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
+ }
+
+ // check context for client tag
+ ctx := tagging.ContextWithIOTag(context.Background(), "")
+ targetTag = qos.IOTagClient.String()
+ require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
+}
+
+func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) {
+ interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor()
+
+ // check context with no value
+ called := false
+ streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ called = true
+ if _, ok := tagging.IOTagFromContext(ctx); ok {
+ return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag)
+ }
+ return nil, nil
+ }
+ _, err := interceptor(context.Background(), nil, nil, "", streamer, nil)
+ require.True(t, called)
+ require.NoError(t, err)
+
+ // check context for internal tag
+ targetTag := qos.IOTagInternal.String()
+ streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ raw, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ return nil, errNoTag
+ }
+ if raw != targetTag {
+ return nil, errWrongTag
+ }
+ return nil, nil
+ }
+ for _, tag := range tags {
+ ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
+ _, err := interceptor(ctx, nil, nil, "", streamer, nil)
+ require.NoError(t, err)
+ }
+
+ // check context for client tag
+ ctx := tagging.ContextWithIOTag(context.Background(), "")
+ targetTag = qos.IOTagClient.String()
+ _, err = interceptor(ctx, nil, nil, "", streamer, nil)
+ require.NoError(t, err)
+}
diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go
new file mode 100644
index 000000000..2d7de32fc
--- /dev/null
+++ b/internal/qos/limiter.go
@@ -0,0 +1,246 @@
+package qos
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+)
+
+const (
+ defaultIdleTimeout time.Duration = 0
+ defaultShare float64 = 1.0
+ minusOne = ^uint64(0)
+
+ defaultMetricsCollectTimeout = 5 * time.Second
+)
+
+type ReleaseFunc scheduling.ReleaseFunc
+
+type Limiter interface {
+ ReadRequest(context.Context) (ReleaseFunc, error)
+ WriteRequest(context.Context) (ReleaseFunc, error)
+ SetParentID(string)
+ SetMetrics(Metrics)
+ Close()
+}
+
+type scheduler interface {
+ RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error)
+ Close()
+}
+
+func NewLimiter(c LimiterConfig) (Limiter, error) {
+ if err := c.Validate(); err != nil {
+ return nil, err
+ }
+ readScheduler, err := createScheduler(c.Read)
+ if err != nil {
+ return nil, fmt.Errorf("create read scheduler: %w", err)
+ }
+ writeScheduler, err := createScheduler(c.Write)
+ if err != nil {
+ return nil, fmt.Errorf("create write scheduler: %w", err)
+ }
+ l := &mClockLimiter{
+ readScheduler: readScheduler,
+ writeScheduler: writeScheduler,
+ closeCh: make(chan struct{}),
+ wg: &sync.WaitGroup{},
+ readStats: createStats(),
+ writeStats: createStats(),
+ }
+ l.shardID.Store(&shardID{})
+ l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
+ l.startMetricsCollect()
+ return l, nil
+}
+
+func createScheduler(config OpConfig) (scheduler, error) {
+ if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit {
+ return newSemaphoreScheduler(config.MaxRunningOps), nil
+ }
+ return scheduling.NewMClock(
+ uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps),
+ converToSchedulingTags(config.Tags), config.IdleTimeout)
+}
+
+func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo {
+ result := make(map[string]scheduling.TagInfo)
+ for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
+ result[tag.String()] = scheduling.TagInfo{
+ Share: defaultShare,
+ }
+ }
+ for _, l := range limits {
+ v := result[l.Tag]
+ if l.Weight != nil && *l.Weight != 0 {
+ v.Share = *l.Weight
+ }
+ if l.LimitOps != nil && *l.LimitOps != 0 {
+ v.LimitIOPS = l.LimitOps
+ }
+ if l.ReservedOps != nil && *l.ReservedOps != 0 {
+ v.ReservedIOPS = l.ReservedOps
+ }
+ v.Prohibited = l.Prohibited
+ result[l.Tag] = v
+ }
+ return result
+}
+
+var (
+ _ Limiter = (*noopLimiter)(nil)
+ releaseStub ReleaseFunc = func() {}
+ noopLimiterInstance = &noopLimiter{}
+)
+
+func NewNoopLimiter() Limiter {
+ return noopLimiterInstance
+}
+
+type noopLimiter struct{}
+
+func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) {
+ return releaseStub, nil
+}
+
+func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
+ return releaseStub, nil
+}
+
+func (n *noopLimiter) SetParentID(string) {}
+
+func (n *noopLimiter) Close() {}
+
+func (n *noopLimiter) SetMetrics(Metrics) {}
+
+var _ Limiter = (*mClockLimiter)(nil)
+
+type shardID struct {
+ id string
+}
+
+type mClockLimiter struct {
+ readScheduler scheduler
+ writeScheduler scheduler
+
+ readStats map[string]*stat
+ writeStats map[string]*stat
+
+ shardID atomic.Pointer[shardID]
+ metrics atomic.Pointer[metricsHolder]
+ closeCh chan struct{}
+ wg *sync.WaitGroup
+}
+
+func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
+ return requestArrival(ctx, n.readScheduler, n.readStats)
+}
+
+func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
+ return requestArrival(ctx, n.writeScheduler, n.writeStats)
+}
+
+func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ tag, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ tag = IOTagClient.String()
+ }
+ stat := getStat(tag, stats)
+ stat.pending.Add(1)
+ if tag == IOTagCritical.String() {
+ stat.inProgress.Add(1)
+ return func() {
+ stat.completed.Add(1)
+ }, nil
+ }
+ rel, err := s.RequestArrival(ctx, tag)
+ stat.inProgress.Add(1)
+ if err != nil {
+ if isResourceExhaustedErr(err) {
+ stat.resourceExhausted.Add(1)
+ return nil, &apistatus.ResourceExhausted{}
+ }
+ stat.completed.Add(1)
+ return nil, err
+ }
+ return func() {
+ rel()
+ stat.completed.Add(1)
+ }, nil
+}
+
+func (n *mClockLimiter) Close() {
+ n.readScheduler.Close()
+ n.writeScheduler.Close()
+ close(n.closeCh)
+ n.wg.Wait()
+ n.metrics.Load().metrics.Close(n.shardID.Load().id)
+}
+
+func (n *mClockLimiter) SetParentID(parentID string) {
+ n.shardID.Store(&shardID{id: parentID})
+}
+
+func (n *mClockLimiter) SetMetrics(m Metrics) {
+ n.metrics.Store(&metricsHolder{metrics: m})
+}
+
+func (n *mClockLimiter) startMetricsCollect() {
+ n.wg.Add(1)
+ go func() {
+ defer n.wg.Done()
+
+ ticker := time.NewTicker(defaultMetricsCollectTimeout)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-n.closeCh:
+ return
+ case <-ticker.C:
+ shardID := n.shardID.Load().id
+ if shardID == "" {
+ continue
+ }
+ metrics := n.metrics.Load().metrics
+ exportMetrics(metrics, n.readStats, shardID, "read")
+ exportMetrics(metrics, n.writeStats, shardID, "write")
+ }
+ }
+ }()
+}
+
+func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
+ var pending uint64
+ var inProgress uint64
+ var completed uint64
+ var resExh uint64
+ for tag, s := range stats {
+ pending = s.pending.Load()
+ inProgress = s.inProgress.Load()
+ completed = s.completed.Load()
+ resExh = s.resourceExhausted.Load()
+ if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
+ continue
+ }
+ metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
+ }
+}
+
+func isResourceExhaustedErr(err error) bool {
+ return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
+ errors.Is(err, errSemaphoreLimitExceeded) ||
+ errors.Is(err, scheduling.ErrTagRequestsProhibited)
+}
diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go
new file mode 100644
index 000000000..c00da51b7
--- /dev/null
+++ b/internal/qos/metrics.go
@@ -0,0 +1,31 @@
+package qos
+
+import "sync/atomic"
+
+type Metrics interface {
+ SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64)
+ Close(shardID string)
+}
+
+var _ Metrics = (*noopMetrics)(nil)
+
+type noopMetrics struct{}
+
+func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) {
+}
+
+func (n *noopMetrics) Close(string) {}
+
+// stat presents limiter statistics cumulative counters.
+//
+// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`.
+type stat struct {
+ completed atomic.Uint64
+ pending atomic.Uint64
+ resourceExhausted atomic.Uint64
+ inProgress atomic.Uint64
+}
+
+type metricsHolder struct {
+ metrics Metrics
+}
diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go
new file mode 100644
index 000000000..74e6928f3
--- /dev/null
+++ b/internal/qos/semaphore.go
@@ -0,0 +1,39 @@
+package qos
+
+import (
+ "context"
+ "errors"
+
+ qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
+)
+
+var (
+ _ scheduler = (*semaphore)(nil)
+ errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded")
+)
+
+type semaphore struct {
+ s *qosSemaphore.Semaphore
+}
+
+func newSemaphoreScheduler(size int64) *semaphore {
+ return &semaphore{
+ s: qosSemaphore.NewSemaphore(size),
+ }
+}
+
+func (s *semaphore) Close() {}
+
+func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ if s.s.Acquire() {
+ return s.s.Release, nil
+ }
+ return nil, errSemaphoreLimitExceeded
+}
diff --git a/internal/qos/stats.go b/internal/qos/stats.go
new file mode 100644
index 000000000..3ecfad9f9
--- /dev/null
+++ b/internal/qos/stats.go
@@ -0,0 +1,29 @@
+package qos
+
+const unknownStatsTag = "unknown"
+
+var statTags = map[string]struct{}{
+ IOTagBackground.String(): {},
+ IOTagClient.String(): {},
+ IOTagCritical.String(): {},
+ IOTagInternal.String(): {},
+ IOTagPolicer.String(): {},
+ IOTagTreeSync.String(): {},
+ IOTagWritecache.String(): {},
+ unknownStatsTag: {},
+}
+
+func createStats() map[string]*stat {
+ result := make(map[string]*stat)
+ for tag := range statTags {
+ result[tag] = &stat{}
+ }
+ return result
+}
+
+func getStat(tag string, stats map[string]*stat) *stat {
+ if v, ok := stats[tag]; ok {
+ return v
+ }
+ return stats[unknownStatsTag]
+}
diff --git a/internal/qos/tags.go b/internal/qos/tags.go
new file mode 100644
index 000000000..e3f7cafd6
--- /dev/null
+++ b/internal/qos/tags.go
@@ -0,0 +1,59 @@
+package qos
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+)
+
+type IOTag string
+
+const (
+ IOTagBackground IOTag = "background"
+ IOTagClient IOTag = "client"
+ IOTagCritical IOTag = "critical"
+ IOTagInternal IOTag = "internal"
+ IOTagPolicer IOTag = "policer"
+ IOTagTreeSync IOTag = "treesync"
+ IOTagWritecache IOTag = "writecache"
+
+ ioTagUnknown IOTag = ""
+)
+
+func FromRawString(s string) (IOTag, error) {
+ switch s {
+ case string(IOTagBackground):
+ return IOTagBackground, nil
+ case string(IOTagClient):
+ return IOTagClient, nil
+ case string(IOTagCritical):
+ return IOTagCritical, nil
+ case string(IOTagInternal):
+ return IOTagInternal, nil
+ case string(IOTagPolicer):
+ return IOTagPolicer, nil
+ case string(IOTagTreeSync):
+ return IOTagTreeSync, nil
+ case string(IOTagWritecache):
+ return IOTagWritecache, nil
+ default:
+ return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
+ }
+}
+
+func (t IOTag) String() string {
+ return string(t)
+}
+
+func IOTagFromContext(ctx context.Context) string {
+ tag, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ tag = "undefined"
+ }
+ return tag
+}
+
+func (t IOTag) IsLocal() bool {
+ return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
+}
diff --git a/internal/qos/validate.go b/internal/qos/validate.go
new file mode 100644
index 000000000..70f1f24e8
--- /dev/null
+++ b/internal/qos/validate.go
@@ -0,0 +1,91 @@
+package qos
+
+import (
+ "errors"
+ "fmt"
+ "math"
+)
+
+var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
+
+type tagConfig struct {
+ Shares, Limit, Reserved *float64
+}
+
+func (c *LimiterConfig) Validate() error {
+ if err := validateOpConfig(c.Read); err != nil {
+ return fmt.Errorf("limits 'read' section validation error: %w", err)
+ }
+ if err := validateOpConfig(c.Write); err != nil {
+ return fmt.Errorf("limits 'write' section validation error: %w", err)
+ }
+ return nil
+}
+
+func validateOpConfig(c OpConfig) error {
+ if c.MaxRunningOps <= 0 {
+ return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
+ }
+ if c.MaxWaitingOps <= 0 {
+ return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps)
+ }
+ if c.IdleTimeout <= 0 {
+ return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String())
+ }
+ if err := validateTags(c.Tags); err != nil {
+ return fmt.Errorf("'tags' config section validation error: %w", err)
+ }
+ return nil
+}
+
+func validateTags(configTags []IOTagConfig) error {
+ tags := map[IOTag]tagConfig{
+ IOTagBackground: {},
+ IOTagClient: {},
+ IOTagInternal: {},
+ IOTagPolicer: {},
+ IOTagTreeSync: {},
+ IOTagWritecache: {},
+ }
+ for _, t := range configTags {
+ tag, err := FromRawString(t.Tag)
+ if err != nil {
+ return fmt.Errorf("invalid tag %s: %w", t.Tag, err)
+ }
+ if _, ok := tags[tag]; !ok {
+ return fmt.Errorf("tag %s is not configurable", t.Tag)
+ }
+ tags[tag] = tagConfig{
+ Shares: t.Weight,
+ Limit: t.LimitOps,
+ Reserved: t.ReservedOps,
+ }
+ }
+ idx := 0
+ var shares float64
+ for t, v := range tags {
+ if idx == 0 {
+ idx++
+ shares = float64Value(v.Shares)
+ } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) {
+ return errWeightsMustBeSpecified
+ }
+ if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) {
+ return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String())
+ }
+ if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) {
+ return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String())
+ }
+ if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) {
+ return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String())
+ }
+ }
+ return nil
+}
+
+func float64Value(f *float64) float64 {
+ if f == nil {
+ return 0.0
+ }
+ return *f
+}
diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go
index e547701fb..590b7a885 100644
--- a/pkg/ape/chainbase/option.go
+++ b/pkg/ape/chainbase/option.go
@@ -5,9 +5,7 @@ import (
"os"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.etcd.io/bbolt"
- "go.uber.org/zap"
)
type Option func(*cfg)
@@ -18,7 +16,6 @@ type cfg struct {
noSync bool
maxBatchDelay time.Duration
maxBatchSize int
- log *logger.Logger
}
func defaultCfg() *cfg {
@@ -26,7 +23,6 @@ func defaultCfg() *cfg {
perm: os.ModePerm,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
maxBatchSize: bbolt.DefaultMaxBatchSize,
- log: &logger.Logger{Logger: zap.L()},
}
}
@@ -59,9 +55,3 @@ func WithMaxBatchSize(maxBatchSize int) Option {
c.maxBatchSize = maxBatchSize
}
}
-
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go
index 953b91a79..8cbb1cce9 100644
--- a/pkg/ape/contract_storage/proxy.go
+++ b/pkg/ape/contract_storage/proxy.go
@@ -31,9 +31,7 @@ type RPCActorProvider interface {
type ProxyVerificationContractStorage struct {
rpcActorProvider RPCActorProvider
- acc *wallet.Account
-
- proxyScriptHash util.Uint160
+ cosigners []actor.SignerAccount
policyScriptHash util.Uint160
}
@@ -41,12 +39,27 @@ type ProxyVerificationContractStorage struct {
var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil)
func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage {
+ acc := wallet.NewAccountFromPrivateKey(key)
return &ProxyVerificationContractStorage{
rpcActorProvider: rpcActorProvider,
- acc: wallet.NewAccountFromPrivateKey(key),
-
- proxyScriptHash: proxyScriptHash,
+ cosigners: []actor.SignerAccount{
+ {
+ Signer: transaction.Signer{
+ Account: proxyScriptHash,
+ Scopes: transaction.CustomContracts,
+ AllowedContracts: []util.Uint160{policyScriptHash},
+ },
+ Account: notary.FakeContractAccount(proxyScriptHash),
+ },
+ {
+ Signer: transaction.Signer{
+ Account: acc.Contract.ScriptHash(),
+ Scopes: transaction.CalledByEntry,
+ },
+ Account: acc,
+ },
+ },
policyScriptHash: policyScriptHash,
}
@@ -64,7 +77,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke {
func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) {
rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
- act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash))
+ act, err := actor.New(rpcActor, contractStorage.cosigners)
if err != nil {
return nil, err
}
@@ -98,31 +111,16 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na
// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners.
func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
- // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
- // ProxyVerificationContractStorage does not manage reconnections.
- contractStorageActor, err := contractStorage.newContractStorageActor()
- if err != nil {
- return nil, err
- }
- return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
+ rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
+ inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor}
+ return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
}
-func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount {
- return []actor.SignerAccount{
- {
- Signer: transaction.Signer{
- Account: proxyScriptHash,
- Scopes: transaction.CustomContracts,
- AllowedContracts: []util.Uint160{policyScriptHash},
- },
- Account: notary.FakeContractAccount(proxyScriptHash),
- },
- {
- Signer: transaction.Signer{
- Account: acc.Contract.ScriptHash(),
- Scopes: transaction.CalledByEntry,
- },
- Account: acc,
- },
- }
+type invokerAdapter struct {
+ *invoker.Invoker
+ rpcInvoker invoker.RPCInvoke
+}
+
+func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
+ return n.rpcInvoker
}
diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go
index c0413678d..d32bd4a07 100644
--- a/pkg/ape/request/frostfsid.go
+++ b/pkg/ape/request/frostfsid.go
@@ -1,6 +1,7 @@
package request
import (
+ "context"
"fmt"
"strconv"
"strings"
@@ -12,9 +13,9 @@ import (
)
// FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID.
-func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) {
+func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) {
reqProps := make(map[string]string)
- subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash())
+ subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash())
if err != nil {
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
return nil, fmt.Errorf("get subject error: %w", err)
@@ -36,8 +37,8 @@ func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvide
}
// Groups return the actor's group ids from frostfsid contract.
-func Groups(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) {
- subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash())
+func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) {
+ subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash())
if err != nil {
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
return nil, fmt.Errorf("get subject error: %w", err)
diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go
index 854fbc49f..98bdf99e7 100644
--- a/pkg/core/client/client.go
+++ b/pkg/core/client/client.go
@@ -3,8 +3,8 @@ package client
import (
"context"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go
index d4bc0cf68..91ee5c6c3 100644
--- a/pkg/core/client/util.go
+++ b/pkg/core/client/util.go
@@ -3,6 +3,7 @@ package client
import (
"bytes"
"fmt"
+ "iter"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -19,7 +20,7 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro
// Args must not be nil.
func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface {
PublicKey() []byte
- IterateAddresses(func(string) bool)
+ Addresses() iter.Seq[string]
NumberOfAddresses() int
ExternalAddresses() []string
},
diff --git a/pkg/core/container/delete.go b/pkg/core/container/delete.go
index 8e0aaebb9..8c14bdf5e 100644
--- a/pkg/core/container/delete.go
+++ b/pkg/core/container/delete.go
@@ -1,7 +1,7 @@
package container
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go
new file mode 100644
index 000000000..1c52d93e7
--- /dev/null
+++ b/pkg/core/container/info.go
@@ -0,0 +1,104 @@
+package container
+
+import (
+ "context"
+ "sync"
+
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+type Info struct {
+ Indexed bool
+ Removed bool
+}
+
+type infoValue struct {
+ info Info
+ err error
+}
+
+type InfoProvider interface {
+ Info(ctx context.Context, id cid.ID) (Info, error)
+}
+
+type infoProvider struct {
+ mtx *sync.RWMutex
+ cache map[cid.ID]infoValue
+ kl *utilSync.KeyLocker[cid.ID]
+
+ source Source
+ sourceErr error
+ sourceOnce *sync.Once
+ sourceFactory func() (Source, error)
+}
+
+func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider {
+ return &infoProvider{
+ mtx: &sync.RWMutex{},
+ cache: make(map[cid.ID]infoValue),
+ sourceOnce: &sync.Once{},
+ kl: utilSync.NewKeyLocker[cid.ID](),
+ sourceFactory: sourceFactory,
+ }
+}
+
+func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) {
+ v, found := r.tryGetFromCache(id)
+ if found {
+ return v.info, v.err
+ }
+
+ return r.getFromSource(ctx, id)
+}
+
+func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ value, found := r.cache[id]
+ return value, found
+}
+
+func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) {
+ r.kl.Lock(id)
+ defer r.kl.Unlock(id)
+
+ if v, ok := r.tryGetFromCache(id); ok {
+ return v.info, v.err
+ }
+
+ r.sourceOnce.Do(func() {
+ r.source, r.sourceErr = r.sourceFactory()
+ })
+ if r.sourceErr != nil {
+ return Info{}, r.sourceErr
+ }
+
+ cnr, err := r.source.Get(ctx, id)
+ var civ infoValue
+ if err != nil {
+ if client.IsErrContainerNotFound(err) {
+ removed, err := WasRemoved(ctx, r.source, id)
+ if err != nil {
+ civ.err = err
+ } else {
+ civ.info.Removed = removed
+ }
+ } else {
+ civ.err = err
+ }
+ } else {
+ civ.info.Indexed = IsIndexedContainer(cnr.Value)
+ }
+ r.putToCache(id, civ)
+ return civ.info, civ.err
+}
+
+func (r *infoProvider) putToCache(id cid.ID, ct infoValue) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ r.cache[id] = ct
+}
diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go
index 69854f495..4eb14e53c 100644
--- a/pkg/core/container/storage.go
+++ b/pkg/core/container/storage.go
@@ -1,6 +1,8 @@
package container
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@@ -41,9 +43,9 @@ type Source interface {
//
// Implementations must not retain the container pointer and modify
// the container through it.
- Get(cid.ID) (*Container, error)
+ Get(ctx context.Context, cid cid.ID) (*Container, error)
- DeletionInfo(cid.ID) (*DelInfo, error)
+ DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error)
}
// EACL groups information about the FrostFS container's extended ACL stored in
@@ -58,16 +60,3 @@ type EACL struct {
// Session within which Value was set. Nil means session absence.
Session *session.Container
}
-
-// EACLSource is the interface that wraps
-// basic methods of extended ACL table source.
-type EACLSource interface {
- // GetEACL reads the table from the source by identifier.
- // It returns any error encountered.
- //
- // GetEACL must return exactly one non-nil value.
- //
- // Must return apistatus.ErrEACLNotFound if requested
- // eACL table is not in source.
- GetEACL(cid.ID) (*EACL, error)
-}
diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go
index 98919284e..61c568052 100644
--- a/pkg/core/container/util.go
+++ b/pkg/core/container/util.go
@@ -1,16 +1,18 @@
package container
import (
+ "context"
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// WasRemoved checks whether the container ever existed or
// it just has not been created yet at the current epoch.
-func WasRemoved(s Source, cid cid.ID) (bool, error) {
- _, err := s.DeletionInfo(cid)
+func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) {
+ _, err := s.DeletionInfo(ctx, cid)
if err == nil {
return true, nil
}
@@ -20,3 +22,14 @@ func WasRemoved(s Source, cid cid.ID) (bool, error) {
}
return false, err
}
+
+// IsIndexedContainer returns True if container attributes should be indexed.
+func IsIndexedContainer(cnr containerSDK.Container) bool {
+ var isS3Container bool
+ for key := range cnr.Attributes() {
+ if key == ".s3-location-constraint" {
+ isS3Container = true
+ }
+ }
+ return !isS3Container
+}
diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go
index ecfd0eb15..e752043d3 100644
--- a/pkg/core/frostfsid/subject_provider.go
+++ b/pkg/core/frostfsid/subject_provider.go
@@ -1,6 +1,8 @@
package frostfsid
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -11,6 +13,6 @@ const (
// SubjectProvider interface provides methods to get subject from FrostfsID contract.
type SubjectProvider interface {
- GetSubject(util.Uint160) (*client.Subject, error)
- GetSubjectExtended(util.Uint160) (*client.SubjectExtended, error)
+ GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error)
+ GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error)
}
diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go
index 29cb2dc94..0c64bb798 100644
--- a/pkg/core/netmap/keys.go
+++ b/pkg/core/netmap/keys.go
@@ -2,6 +2,6 @@ package netmap
// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes.
type AnnouncedKeys interface {
- // Checks if the key was announced by a local node.
+ // IsLocalKey checks if the key was announced by a local node.
IsLocalKey(key []byte) bool
}
diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go
index b0c9e1f9e..e58e42634 100644
--- a/pkg/core/netmap/nodes.go
+++ b/pkg/core/netmap/nodes.go
@@ -1,6 +1,10 @@
package netmap
-import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+import (
+ "iter"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
// Node is a named type of netmap.NodeInfo which provides interface needed
// in the current repository. Node is expected to be used everywhere instead
@@ -14,10 +18,20 @@ func (x Node) PublicKey() []byte {
return (netmap.NodeInfo)(x).PublicKey()
}
+// Addresses returns an iterator over all announced network addresses.
+func (x Node) Addresses() iter.Seq[string] {
+ return (netmap.NodeInfo)(x).NetworkEndpoints()
+}
+
// IterateAddresses iterates over all announced network addresses
// and passes them into f. Handler MUST NOT be nil.
+// Deprecated: use [Node.Addresses] instead.
func (x Node) IterateAddresses(f func(string) bool) {
- (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
+ for s := range (netmap.NodeInfo)(x).NetworkEndpoints() {
+ if f(s) {
+ return
+ }
+ }
}
// NumberOfAddresses returns number of announced network addresses.
diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go
index 7770c61c7..97313da84 100644
--- a/pkg/core/netmap/storage.go
+++ b/pkg/core/netmap/storage.go
@@ -1,6 +1,8 @@
package netmap
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -16,7 +18,7 @@ type Source interface {
//
// Implementations must not retain the network map pointer and modify
// the network map through it.
- GetNetMap(diff uint64) (*netmap.NetMap, error)
+ GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error)
// GetNetMapByEpoch reads network map by the epoch number from the storage.
// It returns the pointer to the requested network map and any error encountered.
@@ -25,21 +27,21 @@ type Source interface {
//
// Implementations must not retain the network map pointer and modify
// the network map through it.
- GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error)
+ GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error)
// Epoch reads the current epoch from the storage.
// It returns thw number of the current epoch and any error encountered.
//
// Must return exactly one non-default value.
- Epoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
}
// GetLatestNetworkMap requests and returns the latest network map from the storage.
-func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) {
- return src.GetNetMap(0)
+func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) {
+ return src.GetNetMap(ctx, 0)
}
// GetPreviousNetworkMap requests and returns previous from the latest network map from the storage.
-func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) {
- return src.GetNetMap(1)
+func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) {
+ return src.GetNetMap(ctx, 1)
}
diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go
index 96f721806..cf090eb37 100644
--- a/pkg/core/object/fmt.go
+++ b/pkg/core/object/fmt.go
@@ -8,11 +8,11 @@ import (
"fmt"
"strconv"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -117,7 +117,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
}
if !unprepared {
- if err := v.validateSignatureKey(obj); err != nil {
+ if err := v.validateSignatureKey(ctx, obj); err != nil {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
@@ -134,7 +134,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
return nil
}
-func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
+func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error {
sig := obj.Signature()
if sig == nil {
return errMissingSignature
@@ -156,7 +156,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
ownerID := obj.OwnerID()
if token == nil && obj.ECHeader() != nil {
- role, err := v.isIROrContainerNode(obj, binKey)
+ role, err := v.isIROrContainerNode(ctx, obj, binKey)
if err != nil {
return err
}
@@ -172,7 +172,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
}
if v.verifyTokenIssuer {
- role, err := v.isIROrContainerNode(obj, binKey)
+ role, err := v.isIROrContainerNode(ctx, obj, binKey)
if err != nil {
return err
}
@@ -190,7 +190,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
return nil
}
-func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
+func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
cnrID, containerIDSet := obj.ContainerID()
if !containerIDSet {
return acl.RoleOthers, errNilCID
@@ -199,12 +199,12 @@ func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey [
cnrIDBin := make([]byte, sha256.Size)
cnrID.Encode(cnrIDBin)
- cnr, err := v.containers.Get(cnrID)
+ cnr, err := v.containers.Get(ctx, cnrID)
if err != nil {
return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
}
- res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value)
+ res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value)
if err != nil {
return acl.RoleOthers, err
}
@@ -361,7 +361,7 @@ func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Obj
func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) {
for _, a := range obj.Attributes() {
- if a.Key() != objectV2.SysAttributeExpEpoch && a.Key() != objectV2.SysAttributeExpEpochNeoFS {
+ if a.Key() != objectV2.SysAttributeExpEpoch {
continue
}
diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go
index 77afbfc45..dc336eb34 100644
--- a/pkg/core/object/fmt_test.go
+++ b/pkg/core/object/fmt_test.go
@@ -7,9 +7,10 @@ import (
"strconv"
"testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -65,7 +66,7 @@ func TestFormatValidator_Validate(t *testing.T) {
epoch: curEpoch,
}),
WithLockSource(ls),
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
ownerKey, err := keys.NewPrivateKey()
@@ -290,7 +291,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(false),
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
tok := sessiontest.Object()
@@ -339,7 +340,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
},
),
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
tok := sessiontest.Object()
@@ -410,14 +411,14 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &testNetmapSource{
- netmaps: map[uint64]*netmap.NetMap{
+ &utilTesting.TestNetmapSource{
+ Netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
},
- currentEpoch: curEpoch,
+ CurrentEpoch: curEpoch,
},
),
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -483,15 +484,15 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &testNetmapSource{
- netmaps: map[uint64]*netmap.NetMap{
+ &utilTesting.TestNetmapSource{
+ Netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
- currentEpoch: curEpoch,
+ CurrentEpoch: curEpoch,
},
),
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -559,15 +560,15 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &testNetmapSource{
- netmaps: map[uint64]*netmap.NetMap{
+ &utilTesting.TestNetmapSource{
+ Netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
- currentEpoch: curEpoch,
+ CurrentEpoch: curEpoch,
},
),
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
require.Error(t, v.Validate(context.Background(), obj, false))
@@ -578,7 +579,7 @@ type testIRSource struct {
irNodes [][]byte
}
-func (s *testIRSource) InnerRingKeys() ([][]byte, error) {
+func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) {
return s.irNodes, nil
}
@@ -586,36 +587,13 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container
}
-func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
+func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
-func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
+func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
return nil, nil
}
-
-type testNetmapSource struct {
- netmaps map[uint64]*netmap.NetMap
- currentEpoch uint64
-}
-
-func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) {
- if diff >= s.currentEpoch {
- return nil, fmt.Errorf("invalid diff")
- }
- return s.GetNetMapByEpoch(s.currentEpoch - diff)
-}
-
-func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
- if nm, found := s.netmaps[epoch]; found {
- return nm, nil
- }
- return nil, fmt.Errorf("netmap not found")
-}
-
-func (s *testNetmapSource) Epoch() (uint64, error) {
- return s.currentEpoch, nil
-}
diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go
index 67c9a3188..aab12ebf9 100644
--- a/pkg/core/object/info.go
+++ b/pkg/core/object/info.go
@@ -13,6 +13,13 @@ type ECInfo struct {
Total uint32
}
+func (v *ECInfo) String() string {
+ if v == nil {
+ return ""
+ }
+ return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total)
+}
+
// Info groups object address with its FrostFS
// object info.
type Info struct {
@@ -23,5 +30,5 @@ type Info struct {
}
func (v Info) String() string {
- return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject)
+ return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo)
}
diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go
index 13d0ebfb1..3733ed507 100644
--- a/pkg/core/object/sender_classifier.go
+++ b/pkg/core/object/sender_classifier.go
@@ -2,6 +2,7 @@ package object
import (
"bytes"
+ "context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -17,7 +18,7 @@ import (
)
type InnerRing interface {
- InnerRingKeys() ([][]byte, error)
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
}
type SenderClassifier struct {
@@ -40,6 +41,7 @@ type ClassifyResult struct {
}
func (c SenderClassifier) Classify(
+ ctx context.Context,
ownerID *user.ID,
ownerKey *keys.PublicKey,
idCnr cid.ID,
@@ -57,15 +59,15 @@ func (c SenderClassifier) Classify(
}, nil
}
- return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr)
+ return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr)
}
-func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
- isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
+func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
+ isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes)
if err != nil {
// do not throw error, try best case matching
- c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
- zap.String("error", err.Error()))
+ c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
+ zap.Error(err))
} else if isInnerRingNode {
return &ClassifyResult{
Role: acl.RoleInnerRing,
@@ -76,13 +78,13 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
binCnr := make([]byte, sha256.Size)
idCnr.Encode(binCnr)
- isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr)
+ isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr)
if err != nil {
// error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so
// do not throw error, try best case matching
- c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
- zap.String("error", err.Error()))
+ c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
+ zap.Error(err))
} else if isContainerNode {
return &ClassifyResult{
Role: acl.RoleContainer,
@@ -97,8 +99,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
}, nil
}
-func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
- innerRingKeys, err := c.innerRing.InnerRingKeys()
+func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) {
+ innerRingKeys, err := c.innerRing.InnerRingKeys(ctx)
if err != nil {
return false, err
}
@@ -114,10 +116,11 @@ func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
}
func (c SenderClassifier) isContainerKey(
+ ctx context.Context,
owner, idCnr []byte,
cnr container.Container,
) (bool, error) {
- nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap
+ nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap
if err != nil {
return false, err
}
@@ -131,7 +134,7 @@ func (c SenderClassifier) isContainerKey(
// then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container
- nm, err = core.GetPreviousNetworkMap(c.netmap)
+ nm, err = core.GetPreviousNetworkMap(ctx, c.netmap)
if err != nil {
return false, err
}
diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go
index c4de07a5f..dfada764a 100644
--- a/pkg/innerring/bindings.go
+++ b/pkg/innerring/bindings.go
@@ -8,7 +8,6 @@ type (
// ContractProcessor interface defines functions for binding event producers
// such as event.Listener and Timers with contract processor.
ContractProcessor interface {
- ListenerNotificationParsers() []event.NotificationParserInfo
ListenerNotificationHandlers() []event.NotificationHandlerInfo
ListenerNotaryParsers() []event.NotaryParserInfo
ListenerNotaryHandlers() []event.NotaryHandlerInfo
@@ -16,11 +15,6 @@ type (
)
func connectListenerWithProcessor(l event.Listener, p ContractProcessor) {
- // register notification parsers
- for _, parser := range p.ListenerNotificationParsers() {
- l.SetNotificationParser(parser)
- }
-
// register notification handlers
for _, handler := range p.ListenerNotificationHandlers() {
l.RegisterNotificationHandler(handler)
diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go
index ad69f207b..3f9d8df5f 100644
--- a/pkg/innerring/blocktimer.go
+++ b/pkg/innerring/blocktimer.go
@@ -3,14 +3,10 @@ package innerring
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
- "go.uber.org/zap"
)
type (
@@ -19,28 +15,12 @@ type (
EpochDuration() uint64
}
- alphaState interface {
- IsAlphabet() bool
- }
-
newEpochHandler func()
- containerEstimationStopper interface {
- StopEstimation(p container.StopEstimationPrm) error
- }
-
epochTimerArgs struct {
- l *logger.Logger
-
- alphabetState alphaState
-
newEpochHandlers []newEpochHandler
- cnrWrapper containerEstimationStopper // to invoke stop container estimation
- epoch epochState // to specify which epoch to stop, and epoch duration
-
- stopEstimationDMul uint32 // X: X/Y of epoch in blocks
- stopEstimationDDiv uint32 // Y: X/Y of epoch in blocks
+ epoch epochState // to specify which epoch to stop, and epoch duration
}
emitTimerArgs struct {
@@ -49,7 +29,7 @@ type (
emitDuration uint32 // in blocks
}
- depositor func() (util.Uint256, error)
+ depositor func(context.Context) (util.Uint256, error)
awaiter func(context.Context, util.Uint256) error
)
@@ -74,7 +54,7 @@ func (s *Server) tickTimers(h uint32) {
}
func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
- epochTimer := timer.NewBlockTimer(
+ return timer.NewBlockTimer(
func() (uint32, error) {
return uint32(args.epoch.EpochDuration()), nil
},
@@ -84,42 +64,13 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
}
},
)
-
- // sub-timer for epoch timer to tick stop container estimation events at
- // some block in epoch
- epochTimer.OnDelta(
- args.stopEstimationDMul,
- args.stopEstimationDDiv,
- func() {
- if !args.alphabetState.IsAlphabet() {
- args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations)
- return
- }
-
- epochN := args.epoch.EpochCounter()
- if epochN == 0 { // estimates are invalid in genesis epoch
- return
- }
-
- prm := container.StopEstimationPrm{}
- prm.SetEpoch(epochN - 1)
-
- err := args.cnrWrapper.StopEstimation(prm)
- if err != nil {
- args.l.Warn(logs.InnerringCantStopEpochEstimation,
- zap.Uint64("epoch", epochN),
- zap.String("error", err.Error()))
- }
- })
-
- return epochTimer
}
-func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer {
+func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer {
return timer.NewBlockTimer(
timer.StaticBlockMeter(args.emitDuration),
func() {
- args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{})
+ args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{})
},
)
}
diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go
index 242c0903b..4cbe7e394 100644
--- a/pkg/innerring/blocktimer_test.go
+++ b/pkg/innerring/blocktimer_test.go
@@ -3,29 +3,20 @@ package innerring
import (
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/stretchr/testify/require"
)
func TestEpochTimer(t *testing.T) {
t.Parallel()
- alphaState := &testAlphabetState{isAlphabet: true}
neh := &testNewEpochHandler{}
- cnrStopper := &testContainerEstStopper{}
epochState := &testEpochState{
counter: 99,
duration: 10,
}
args := &epochTimerArgs{
- l: test.NewLogger(t),
- alphabetState: alphaState,
- newEpochHandlers: []newEpochHandler{neh.Handle},
- cnrWrapper: cnrStopper,
- epoch: epochState,
- stopEstimationDMul: 2,
- stopEstimationDDiv: 10,
+ newEpochHandlers: []newEpochHandler{neh.Handle},
+ epoch: epochState,
}
et := newEpochTimer(args)
err := et.Reset()
@@ -33,63 +24,43 @@ func TestEpochTimer(t *testing.T) {
et.Tick(100)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 0, cnrStopper.called, "invalid container stop handler calls")
et.Tick(101)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(102)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(103)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
var h uint32
for h = 104; h < 109; h++ {
et.Tick(h)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
}
et.Tick(109)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(110)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(111)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
et.Tick(112)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
et.Tick(113)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
for h = 114; h < 119; h++ {
et.Tick(h)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
}
et.Tick(120)
require.Equal(t, 2, neh.called, "invalid new epoch handler calls")
- require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
-}
-
-type testAlphabetState struct {
- isAlphabet bool
-}
-
-func (s *testAlphabetState) IsAlphabet() bool {
- return s.isAlphabet
}
type testNewEpochHandler struct {
@@ -100,15 +71,6 @@ func (h *testNewEpochHandler) Handle() {
h.called++
}
-type testContainerEstStopper struct {
- called int
-}
-
-func (s *testContainerEstStopper) StopEstimation(_ container.StopEstimationPrm) error {
- s.called++
- return nil
-}
-
type testEpochState struct {
counter uint64
duration uint64
diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go
index 4a80ebf3b..7deec3f31 100644
--- a/pkg/innerring/fetcher.go
+++ b/pkg/innerring/fetcher.go
@@ -1,6 +1,8 @@
package innerring
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -47,12 +49,12 @@ type IrFetcherWithoutNotary struct {
// InnerRingKeys fetches list of innerring keys from NeoFSAlphabet
// role in the sidechain.
-func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) {
- return fN.cli.NeoFSAlphabetList()
+func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) {
+ return fN.cli.NeoFSAlphabetList(ctx)
}
// InnerRingKeys fetches list of innerring keys from netmap contract
// in the sidechain.
-func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) {
- return f.nm.GetInnerRingList()
+func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) {
+ return f.nm.GetInnerRingList(ctx)
}
diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go
index 45135a57b..439400bac 100644
--- a/pkg/innerring/indexer.go
+++ b/pkg/innerring/indexer.go
@@ -1,6 +1,7 @@
package innerring
import (
+ "context"
"fmt"
"sync"
"time"
@@ -10,7 +11,7 @@ import (
type (
irFetcher interface {
- InnerRingKeys() (keys.PublicKeys, error)
+ InnerRingKeys(ctx context.Context) (keys.PublicKeys, error)
}
committeeFetcher interface {
@@ -45,7 +46,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK
}
}
-func (s *innerRingIndexer) update() (ind indexes, err error) {
+func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) {
s.RLock()
if time.Since(s.lastAccess) < s.timeout {
@@ -62,7 +63,7 @@ func (s *innerRingIndexer) update() (ind indexes, err error) {
return s.ind, nil
}
- innerRing, err := s.irFetcher.InnerRingKeys()
+ innerRing, err := s.irFetcher.InnerRingKeys(ctx)
if err != nil {
return indexes{}, err
}
@@ -81,8 +82,8 @@ func (s *innerRingIndexer) update() (ind indexes, err error) {
return s.ind, nil
}
-func (s *innerRingIndexer) InnerRingIndex() (int32, error) {
- ind, err := s.update()
+func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) {
+ ind, err := s.update(ctx)
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -90,8 +91,8 @@ func (s *innerRingIndexer) InnerRingIndex() (int32, error) {
return ind.innerRingIndex, nil
}
-func (s *innerRingIndexer) InnerRingSize() (int32, error) {
- ind, err := s.update()
+func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) {
+ ind, err := s.update(ctx)
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -99,8 +100,8 @@ func (s *innerRingIndexer) InnerRingSize() (int32, error) {
return ind.innerRingSize, nil
}
-func (s *innerRingIndexer) AlphabetIndex() (int32, error) {
- ind, err := s.update()
+func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) {
+ ind, err := s.update(ctx)
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go
index c8a819b5b..f8201b7df 100644
--- a/pkg/innerring/indexer_test.go
+++ b/pkg/innerring/indexer_test.go
@@ -1,6 +1,7 @@
package innerring
import (
+ "context"
"fmt"
"sync/atomic"
"testing"
@@ -37,15 +38,15 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex()
+ idx, err := indexer.AlphabetIndex(context.Background())
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex()
+ idx, err = indexer.InnerRingIndex(context.Background())
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(2), idx, "invalid IR index")
- size, err := indexer.InnerRingSize()
+ size, err := indexer.InnerRingSize(context.Background())
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(3), size, "invalid IR size")
})
@@ -56,11 +57,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex()
+ idx, err := indexer.AlphabetIndex(context.Background())
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex()
+ idx, err = indexer.InnerRingIndex(context.Background())
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(0), idx, "invalid IR index")
})
@@ -71,11 +72,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex()
+ idx, err := indexer.AlphabetIndex(context.Background())
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex()
+ idx, err = indexer.InnerRingIndex(context.Background())
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
})
@@ -100,30 +101,30 @@ func TestIndexerCachesIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex()
+ idx, err := indexer.AlphabetIndex(context.Background())
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex()
+ idx, err = indexer.InnerRingIndex(context.Background())
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err := indexer.InnerRingSize()
+ size, err := indexer.InnerRingSize(context.Background())
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count")
require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count")
- idx, err = indexer.AlphabetIndex()
+ idx, err = indexer.AlphabetIndex(context.Background())
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex()
+ idx, err = indexer.InnerRingIndex(context.Background())
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err = indexer.InnerRingSize()
+ size, err = indexer.InnerRingSize(context.Background())
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -132,15 +133,15 @@ func TestIndexerCachesIndexes(t *testing.T) {
time.Sleep(2 * time.Second)
- idx, err = indexer.AlphabetIndex()
+ idx, err = indexer.AlphabetIndex(context.Background())
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex()
+ idx, err = indexer.InnerRingIndex(context.Background())
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err = indexer.InnerRingSize()
+ size, err = indexer.InnerRingSize(context.Background())
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -165,15 +166,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex()
+ idx, err := indexer.AlphabetIndex(context.Background())
require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex()
+ idx, err = indexer.InnerRingIndex(context.Background())
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index")
- size, err := indexer.InnerRingSize()
+ size, err := indexer.InnerRingSize(context.Background())
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -189,15 +190,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer = newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err = indexer.AlphabetIndex()
+ idx, err = indexer.AlphabetIndex(context.Background())
require.ErrorContains(t, err, "test IR error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex()
+ idx, err = indexer.InnerRingIndex(context.Background())
require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index")
- size, err = indexer.InnerRingSize()
+ size, err = indexer.InnerRingSize(context.Background())
require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size")
}
@@ -219,7 +220,7 @@ type testIRFetcher struct {
calls atomic.Int32
}
-func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
+func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) {
f.calls.Add(1)
return f.keys, f.err
}
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 7da0a9794..3d236641e 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -35,31 +35,30 @@ import (
"google.golang.org/grpc"
)
-func (s *Server) initNetmapProcessor(cfg *viper.Viper,
- cnrClient *container.Client,
+func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
alphaSync event.Handler,
) error {
- locodeValidator, err := s.newLocodeValidator(cfg)
- if err != nil {
- return err
- }
+ locodeValidator := s.newLocodeValidator(cfg)
netSettings := (*networkSettings)(s.netmapClient)
var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
netMapCandidateStateValidator.SetNetworkSettings(netSettings)
+ poolSize := cfg.GetInt("workers.netmap")
+ s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
+
+ var err error
s.netmapProcessor, err = netmap.New(&netmap.Params{
- Log: s.log,
+ Log: s.log.WithTag(logger.TagProcessor),
Metrics: s.irMetrics,
- PoolSize: cfg.GetInt("workers.netmap"),
+ PoolSize: poolSize,
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
EpochTimer: s,
EpochState: s,
AlphabetState: s,
CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"),
CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"),
- ContainerWrapper: cnrClient,
NotaryDepositHandler: s.onlyAlphabetEventHandler(
s.notaryHandler,
),
@@ -99,7 +98,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
- s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
+ s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
}
mainnetChain.from = fromMainChainBlock
@@ -139,12 +138,12 @@ func (s *Server) enableNotarySupport() error {
return nil
}
-func (s *Server) initNotaryConfig() {
+func (s *Server) initNotaryConfig(ctx context.Context) {
s.mainNotaryConfig = notaryConfigs(
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
)
- s.log.Info(logs.InnerringNotarySupport,
+ s.log.Info(ctx, logs.InnerringNotarySupport,
zap.Bool("sidechain_enabled", true),
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
)
@@ -154,16 +153,15 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
var alphaSync event.Handler
if s.withoutMainNet || cfg.GetBool("governance.disable") {
- alphaSync = func(event.Event) {
- s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
+ alphaSync = func(ctx context.Context, _ event.Event) {
+ s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled)
}
} else {
// create governance processor
governanceProcessor, err := governance.New(&governance.Params{
- Log: s.log,
+ Log: s.log.WithTag(logger.TagProcessor),
Metrics: s.irMetrics,
FrostFSClient: frostfsCli,
- NetmapClient: s.netmapClient,
AlphabetState: s,
EpochState: s,
Voter: s,
@@ -199,21 +197,16 @@ func (s *Server) createIRFetcher() irFetcher {
return irf
}
-func (s *Server) initTimers(cfg *viper.Viper, morphClients *serverMorphClients) {
+func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
s.epochTimer = newEpochTimer(&epochTimerArgs{
- l: s.log,
- alphabetState: s,
- newEpochHandlers: s.newEpochTickHandlers(),
- cnrWrapper: morphClients.CnrClient,
- epoch: s,
- stopEstimationDMul: cfg.GetUint32("timers.stop_estimation.mul"),
- stopEstimationDDiv: cfg.GetUint32("timers.stop_estimation.div"),
+ newEpochHandlers: s.newEpochTickHandlers(ctx),
+ epoch: s,
})
s.addBlockTimer(s.epochTimer)
// initialize emission timer
- emissionTimer := newEmissionTimer(&emitTimerArgs{
+ emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{
ap: s.alphabetProcessor,
emitDuration: cfg.GetUint32("timers.emit"),
})
@@ -221,18 +214,20 @@ func (s *Server) initTimers(cfg *viper.Viper, morphClients *serverMorphClients)
s.addBlockTimer(emissionTimer)
}
-func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
+func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error {
parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
if err != nil {
return err
}
+ poolSize := cfg.GetInt("workers.alphabet")
+ s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize))
// create alphabet processor
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
ParsedWallets: parsedWallets,
- Log: s.log,
+ Log: s.log.WithTag(logger.TagProcessor),
Metrics: s.irMetrics,
- PoolSize: cfg.GetInt("workers.alphabet"),
+ PoolSize: poolSize,
AlphabetContracts: s.contracts.alphabet,
NetmapClient: s.netmapClient,
MorphClient: s.morphClient,
@@ -247,12 +242,14 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
return err
}
-func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
+func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
+ poolSize := cfg.GetInt("workers.container")
+ s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
// container processor
containerProcessor, err := cont.New(&cont.Params{
- Log: s.log,
+ Log: s.log.WithTag(logger.TagProcessor),
Metrics: s.irMetrics,
- PoolSize: cfg.GetInt("workers.container"),
+ PoolSize: poolSize,
AlphabetState: s,
ContainerClient: cnrClient,
MorphClient: cnrClient.Morph(),
@@ -266,12 +263,14 @@ func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.C
return bindMorphProcessor(containerProcessor, s)
}
-func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
+func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
+ poolSize := cfg.GetInt("workers.balance")
+ s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
// create balance processor
balanceProcessor, err := balance.New(&balance.Params{
- Log: s.log,
+ Log: s.log.WithTag(logger.TagProcessor),
Metrics: s.irMetrics,
- PoolSize: cfg.GetInt("workers.balance"),
+ PoolSize: poolSize,
FrostFSClient: frostfsCli,
BalanceSC: s.contracts.balance,
AlphabetState: s,
@@ -284,15 +283,17 @@ func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClien
return bindMorphProcessor(balanceProcessor, s)
}
-func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
+func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error {
if s.withoutMainNet {
return nil
}
+ poolSize := cfg.GetInt("workers.frostfs")
+ s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
frostfsProcessor, err := frostfs.New(&frostfs.Params{
- Log: s.log,
+ Log: s.log.WithTag(logger.TagProcessor),
Metrics: s.irMetrics,
- PoolSize: cfg.GetInt("workers.frostfs"),
+ PoolSize: poolSize,
FrostFSContract: s.contracts.frostfs,
BalanceClient: s.balanceClient,
NetmapClient: s.netmapClient,
@@ -312,10 +313,10 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
return bindMainnetProcessor(frostfsProcessor, s)
}
-func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
+func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
if controlSvcEndpoint == "" {
- s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
+ s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified)
return nil
}
@@ -341,7 +342,7 @@ func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *ato
controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient,
controlsrv.WithAllowedKeys(authKeys),
- ), log, audit)
+ ), log.WithTag(logger.TagGrpcSvc), audit)
grpcControlSrv := grpc.NewServer()
control.RegisterControlServiceServer(grpcControlSrv, controlSvc)
@@ -377,7 +378,6 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
// form morph container client's options
morphCnrOpts := make([]container.Option, 0, 3)
morphCnrOpts = append(morphCnrOpts,
- container.TryNotary(),
container.AsAlphabet(),
)
@@ -387,12 +387,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
}
s.containerClient = result.CnrClient
- s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet())
+ s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet())
if err != nil {
return nil, err
}
- s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet())
+ s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet())
if err != nil {
return nil, err
}
@@ -411,7 +411,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
return result, nil
}
-func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error {
+func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error {
irf := s.createIRFetcher()
s.statusIndex = newInnerRingIndexer(
@@ -426,27 +426,27 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien
return err
}
- err = s.initNetmapProcessor(cfg, morphClients.CnrClient, alphaSync)
+ err = s.initNetmapProcessor(ctx, cfg, alphaSync)
if err != nil {
return err
}
- err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
+ err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
if err != nil {
return err
}
- err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient)
+ err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient)
if err != nil {
return err
}
- err = s.initFrostFSMainnetProcessor(cfg)
+ err = s.initFrostFSMainnetProcessor(ctx, cfg)
if err != nil {
return err
}
- err = s.initAlphabetProcessor(cfg)
+ err = s.initAlphabetProcessor(ctx, cfg)
return err
}
@@ -454,16 +454,17 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
- s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
+ s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
}
morphChain := &chainParams{
- log: s.log,
+ log: s.log.WithTag(logger.TagMorph),
cfg: cfg,
key: s.key,
name: morphPrefix,
from: fromSideChainBlock,
morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
+ multinetMetrics: s.irMetrics.Multinet(),
}
// create morph client
@@ -478,7 +479,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
return nil, err
}
if err := s.morphClient.SetGroupSignerScope(); err != nil {
- morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
+ morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
}
return morphChain, nil
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 50a37845b..3a5137261 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
@@ -103,6 +104,8 @@ type (
// to the application.
runners []func(chan<- error) error
+ // cmode used for upgrade scenario.
+ // nolint:unused
cmode *atomic.Bool
}
@@ -114,6 +117,7 @@ type (
sgn *transaction.Signer
from uint32 // block height
morphCacheMetric metrics.MorphCacheMetrics
+ multinetMetrics metrics.MultinetMetrics
}
)
@@ -136,10 +140,10 @@ var (
// Start runs all event providers.
func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
- s.setHealthStatus(control.HealthStatus_STARTING)
+ s.setHealthStatus(ctx, control.HealthStatus_STARTING)
defer func() {
if err == nil {
- s.setHealthStatus(control.HealthStatus_READY)
+ s.setHealthStatus(ctx, control.HealthStatus_READY)
}
}()
@@ -148,12 +152,12 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
return err
}
- err = s.initConfigFromBlockchain()
+ err = s.initConfigFromBlockchain(ctx)
if err != nil {
return err
}
- if s.IsAlphabet() {
+ if s.IsAlphabet(ctx) {
err = s.initMainNotary(ctx)
if err != nil {
return err
@@ -169,14 +173,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
prm.Validators = s.predefinedValidators
// vote for sidechain validator if it is prepared in config
- err = s.voteForSidechainValidator(prm)
+ err = s.voteForSidechainValidator(ctx, prm)
if err != nil {
// we don't stop inner ring execution on this error
- s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
- zap.String("error", err.Error()))
+ s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
+ zap.Error(err))
}
- s.tickInitialExpoch()
+ s.tickInitialExpoch(ctx)
morphErr := make(chan error)
mainnnetErr := make(chan error)
@@ -213,14 +217,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
}
func (s *Server) registerMorphNewBlockEventHandler() {
- s.morphListener.RegisterBlockHandler(func(b *block.Block) {
- s.log.Debug(logs.InnerringNewBlock,
+ s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
+ s.log.Debug(ctx, logs.InnerringNewBlock,
zap.Uint32("index", b.Index),
)
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
if err != nil {
- s.log.Warn(logs.InnerringCantUpdatePersistentState,
+ s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", b.Index))
}
@@ -231,10 +235,10 @@ func (s *Server) registerMorphNewBlockEventHandler() {
func (s *Server) registerMainnetNewBlockEventHandler() {
if !s.withoutMainNet {
- s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
+ s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
if err != nil {
- s.log.Warn(logs.InnerringCantUpdatePersistentState,
+ s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
zap.String("chain", "main"),
zap.Uint32("block_index", b.Index))
}
@@ -279,11 +283,11 @@ func (s *Server) initSideNotary(ctx context.Context) error {
)
}
-func (s *Server) tickInitialExpoch() {
+func (s *Server) tickInitialExpoch(ctx context.Context) {
initialEpochTicker := timer.NewOneTickTimer(
timer.StaticBlockMeter(s.initialEpochTickDelta),
func() {
- s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
+ s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
})
s.addBlockTimer(initialEpochTicker)
}
@@ -295,16 +299,16 @@ func (s *Server) startWorkers(ctx context.Context) {
}
// Stop closes all subscription channels.
-func (s *Server) Stop() {
- s.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
+func (s *Server) Stop(ctx context.Context) {
+ s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
go s.morphListener.Stop()
go s.mainnetListener.Stop()
for _, c := range s.closers {
if err := c(); err != nil {
- s.log.Warn(logs.InnerringCloserError,
- zap.String("error", err.Error()),
+ s.log.Warn(ctx, logs.InnerringCloserError,
+ zap.Error(err),
)
}
}
@@ -335,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
) (*Server, error) {
var err error
server := &Server{
- log: log,
+ log: log.WithTag(logger.TagIr),
irMetrics: metrics,
cmode: cmode,
}
@@ -345,7 +349,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
- server.setHealthStatus(control.HealthStatus_HEALTH_STATUS_UNDEFINED)
+ server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED)
// parse notary support
server.feeConfig = config.NewFeeConfig(cfg)
@@ -372,7 +376,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
- server.initNotaryConfig()
+ server.initNotaryConfig(ctx)
err = server.initContracts(cfg)
if err != nil {
@@ -396,14 +400,14 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
- err = server.initProcessors(cfg, morphClients)
+ err = server.initProcessors(ctx, cfg, morphClients)
if err != nil {
return nil, err
}
- server.initTimers(cfg, morphClients)
+ server.initTimers(ctx, cfg)
- err = server.initGRPCServer(cfg, log, audit)
+ err = server.initGRPCServer(ctx, cfg, log, audit)
if err != nil {
return nil, err
}
@@ -434,7 +438,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
}
listener, err := event.NewListener(event.ListenerParams{
- Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
+ Logger: p.log.With(zap.String("chain", p.name)),
Subscriber: sub,
})
if err != nil {
@@ -484,6 +488,12 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
return nil, fmt.Errorf("%s chain client endpoints not provided", p.name)
}
+ nc := parseMultinetConfig(p.cfg, p.multinetMetrics)
+ ds, err := internalNet.NewDialerSource(nc)
+ if err != nil {
+ return nil, fmt.Errorf("dialer source: %w", err)
+ }
+
return client.New(
ctx,
p.key,
@@ -496,6 +506,7 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
}),
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
client.WithMorphCacheMetrics(p.morphCacheMetric),
+ client.WithDialerSource(ds),
)
}
@@ -540,21 +551,43 @@ func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) {
return extraWallets, nil
}
-func (s *Server) initConfigFromBlockchain() error {
+func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNet.Config {
+ nc := internalNet.Config{
+ Enabled: cfg.GetBool("multinet.enabled"),
+ Balancer: cfg.GetString("multinet.balancer"),
+ Restrict: cfg.GetBool("multinet.restrict"),
+ FallbackDelay: cfg.GetDuration("multinet.fallback_delay"),
+ Metrics: m,
+ }
+ for i := 0; ; i++ {
+ mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i))
+ if mask == "" {
+ break
+ }
+ sourceIPs := cfg.GetStringSlice(fmt.Sprintf("multinet.subnets.%d.source_ips", i))
+ nc.Subnets = append(nc.Subnets, internalNet.Subnet{
+ Prefix: mask,
+ SourceIPs: sourceIPs,
+ })
+ }
+ return nc
+}
+
+func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
// get current epoch
- epoch, err := s.netmapClient.Epoch()
+ epoch, err := s.netmapClient.Epoch(ctx)
if err != nil {
return fmt.Errorf("can't read epoch number: %w", err)
}
// get current epoch duration
- epochDuration, err := s.netmapClient.EpochDuration()
+ epochDuration, err := s.netmapClient.EpochDuration(ctx)
if err != nil {
return fmt.Errorf("can't read epoch duration: %w", err)
}
// get balance precision
- balancePrecision, err := s.balanceClient.Decimals()
+ balancePrecision, err := s.balanceClient.Decimals(ctx)
if err != nil {
return fmt.Errorf("can't read balance contract precision: %w", err)
}
@@ -564,14 +597,14 @@ func (s *Server) initConfigFromBlockchain() error {
s.precision.SetBalancePrecision(balancePrecision)
// get next epoch delta tick
- s.initialEpochTickDelta, err = s.nextEpochBlockDelta()
+ s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx)
if err != nil {
return err
}
- s.log.Debug(logs.InnerringReadConfigFromBlockchain,
- zap.Bool("active", s.IsActive()),
- zap.Bool("alphabet", s.IsAlphabet()),
+ s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain,
+ zap.Bool("active", s.IsActive(ctx)),
+ zap.Bool("alphabet", s.IsAlphabet(ctx)),
zap.Uint64("epoch", epoch),
zap.Uint32("precision", balancePrecision),
zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta),
@@ -580,8 +613,8 @@ func (s *Server) initConfigFromBlockchain() error {
return nil
}
-func (s *Server) nextEpochBlockDelta() (uint32, error) {
- epochBlock, err := s.netmapClient.LastEpochBlock()
+func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) {
+ epochBlock, err := s.netmapClient.LastEpochBlock(ctx)
if err != nil {
return 0, fmt.Errorf("can't read last epoch block: %w", err)
}
@@ -602,17 +635,17 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) {
// onlyAlphabet wrapper around event handler that executes it
// only if inner ring node is alphabet node.
func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler {
- return func(ev event.Event) {
- if s.IsAlphabet() {
- f(ev)
+ return func(ctx context.Context, ev event.Event) {
+ if s.IsAlphabet(ctx) {
+ f(ctx, ev)
}
}
}
-func (s *Server) newEpochTickHandlers() []newEpochHandler {
+func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler {
newEpochHandlers := []newEpochHandler{
func() {
- s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
+ s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
},
}
diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go
index a0c3ea751..ae4c85168 100644
--- a/pkg/innerring/locode.go
+++ b/pkg/innerring/locode.go
@@ -9,7 +9,7 @@ import (
"github.com/spf13/viper"
)
-func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) {
+func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
locodeDB := locodebolt.New(locodebolt.Prm{
Path: cfg.GetString("locode.db.path"),
},
@@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, err
return irlocode.New(irlocode.Prm{
DB: (*locodeBoltDBWrapper)(locodeDB),
- }), nil
+ })
}
type locodeBoltEntryWrapper struct {
diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go
index 9961710ca..fb11e9426 100644
--- a/pkg/innerring/netmap.go
+++ b/pkg/innerring/netmap.go
@@ -1,6 +1,7 @@
package innerring
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@@ -17,8 +18,8 @@ type networkSettings netmapclient.Client
// MaintenanceModeAllowed requests network configuration from the Sidechain
// and check allowance of storage node's maintenance mode according to it.
// Always returns state.ErrMaintenanceModeDisallowed.
-func (s *networkSettings) MaintenanceModeAllowed() error {
- allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed()
+func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error {
+ allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx)
if err != nil {
return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err)
} else if allowed {
diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go
index e6f2b1de4..c8a69962f 100644
--- a/pkg/innerring/notary.go
+++ b/pkg/innerring/notary.go
@@ -28,37 +28,39 @@ const (
gasDivisor = 2
)
-func (s *Server) depositMainNotary() (tx util.Uint256, err error) {
+func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err)
}
return s.mainnetClient.DepositNotary(
+ ctx,
depositAmount,
uint32(s.epochDuration.Load())+notaryExtraBlocks,
)
}
-func (s *Server) depositSideNotary() (tx util.Uint256, err error) {
+func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err)
}
- return s.morphClient.DepositEndlessNotary(depositAmount)
+ tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount)
+ return tx, err
}
-func (s *Server) notaryHandler(_ event.Event) {
+func (s *Server) notaryHandler(ctx context.Context, _ event.Event) {
if !s.mainNotaryConfig.disabled {
- _, err := s.depositMainNotary()
+ _, err := s.depositMainNotary(ctx)
if err != nil {
- s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
+ s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
}
}
- if _, err := s.depositSideNotary(); err != nil {
- s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
+ if _, err := s.depositSideNotary(ctx); err != nil {
+ s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
}
}
@@ -71,7 +73,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er
}
func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error {
- tx, err := deposit()
+ tx, err := deposit(ctx)
if err != nil {
return err
}
@@ -80,11 +82,11 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
- s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
+ s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade)
return nil
}
- s.log.Info(msg)
+ s.log.Info(ctx, msg)
return await(ctx, tx)
}
diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go
index 9de075f17..d6b653282 100644
--- a/pkg/innerring/processors/alphabet/handlers.go
+++ b/pkg/innerring/processors/alphabet/handlers.go
@@ -1,6 +1,8 @@
package alphabet
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
@@ -8,16 +10,16 @@ import (
"go.uber.org/zap"
)
-func (ap *Processor) HandleGasEmission(ev event.Event) {
+func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) {
_ = ev.(timers.NewAlphabetEmitTick)
- ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
+ ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
// send event to the worker pool
- err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit)
+ err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) })
if err != nil {
// there system can be moved into controlled degradation stage
- ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
+ ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained,
zap.Int("capacity", ap.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go
index dfda37472..1da3c401d 100644
--- a/pkg/innerring/processors/alphabet/handlers_test.go
+++ b/pkg/innerring/processors/alphabet/handlers_test.go
@@ -1,11 +1,13 @@
package alphabet_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -60,7 +62,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -95,7 +97,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160 = []util.Uint160{}
+ var parsedWallets []util.Uint160
alphabetContracts := innerring.NewAlphabetContracts()
for i := range index + 1 {
@@ -137,7 +139,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -167,7 +169,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160 = []util.Uint160{}
+ var parsedWallets []util.Uint160
alphabetContracts := innerring.NewAlphabetContracts()
for i := range index + 1 {
@@ -176,7 +178,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
morphClient := &testMorphClient{}
- nodes := []netmap.NodeInfo{}
+ var nodes []netmap.NodeInfo
network := &netmap.NetMap{}
network.SetNodes(nodes)
@@ -198,7 +200,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -219,7 +221,7 @@ type testIndexer struct {
index int
}
-func (i *testIndexer) AlphabetIndex() int {
+func (i *testIndexer) AlphabetIndex(context.Context) int {
return i.index
}
@@ -246,7 +248,7 @@ type testMorphClient struct {
batchTransferedGas []batchTransferGas
}
-func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
+func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) {
c.invokedMethods = append(c.invokedMethods,
invokedMethod{
contract: contract,
@@ -254,7 +256,7 @@ func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, metho
method: method,
args: args,
})
- return 0, nil
+ return client.InvokeRes{}, nil
}
func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error {
@@ -277,6 +279,6 @@ type testNetmapClient struct {
netmap *netmap.NetMap
}
-func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
+func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) {
return c.netmap, nil
}
diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go
index 2317f3e98..d3d0f83f2 100644
--- a/pkg/innerring/processors/alphabet/process_emit.go
+++ b/pkg/innerring/processors/alphabet/process_emit.go
@@ -1,6 +1,7 @@
package alphabet
import (
+ "context"
"crypto/elliptic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -13,40 +14,40 @@ import (
const emitMethod = "emit"
-func (ap *Processor) processEmit() bool {
- index := ap.irList.AlphabetIndex()
+func (ap *Processor) processEmit(ctx context.Context) bool {
+ index := ap.irList.AlphabetIndex(ctx)
if index < 0 {
- ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
+ ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
return true
}
contract, ok := ap.alphabetContracts.GetByIndex(index)
if !ok {
- ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
+ ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
zap.Int("index", index))
return false
}
// there is no signature collecting, so we don't need extra fee
- _, err := ap.morphClient.Invoke(contract, 0, emitMethod)
+ _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
if err != nil {
- ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
+ ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
return false
}
if ap.storageEmission == 0 {
- ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
+ ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff)
return true
}
- networkMap, err := ap.netmapClient.NetMap()
+ networkMap, err := ap.netmapClient.NetMap(ctx)
if err != nil {
- ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
- zap.String("error", err.Error()))
+ ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
+ zap.Error(err))
return false
}
@@ -58,7 +59,7 @@ func (ap *Processor) processEmit() bool {
ap.pwLock.RUnlock()
extraLen := len(pw)
- ap.log.Debug(logs.AlphabetGasEmission,
+ ap.log.Debug(ctx, logs.AlphabetGasEmission,
zap.Int("network_map", nmLen),
zap.Int("extra_wallets", extraLen))
@@ -68,37 +69,37 @@ func (ap *Processor) processEmit() bool {
gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen))
- ap.transferGasToNetmapNodes(nmNodes, gasPerNode)
+ ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode)
- ap.transferGasToExtraNodes(pw, gasPerNode)
+ ap.transferGasToExtraNodes(ctx, pw, gasPerNode)
return true
}
-func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
+func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
for i := range nmNodes {
keyBytes := nmNodes[i].PublicKey()
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
- ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
- zap.String("error", err.Error()))
+ ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
+ zap.Error(err))
continue
}
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
if err != nil {
- ap.log.Warn(logs.AlphabetCantTransferGas,
+ ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
}
}
-func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) {
+func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) {
if len(pw) > 0 {
err := ap.morphClient.BatchTransferGas(pw, gasPerNode)
if err != nil {
@@ -106,10 +107,10 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed
for i, addr := range pw {
receiversLog[i] = addr.StringLE()
}
- ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
+ ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
}
diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go
index ce6679969..0aea74003 100644
--- a/pkg/innerring/processors/alphabet/processor.go
+++ b/pkg/innerring/processors/alphabet/processor.go
@@ -1,26 +1,26 @@
package alphabet
import (
+ "context"
"errors"
"fmt"
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
// Indexer is a callback interface for inner ring global state.
Indexer interface {
- AlphabetIndex() int
+ AlphabetIndex(context.Context) int
}
// Contracts is an interface of the storage
@@ -36,11 +36,11 @@ type (
}
netmapClient interface {
- NetMap() (*netmap.NetMap, error)
+ NetMap(ctx context.Context) (*netmap.NetMap, error)
}
morphClient interface {
- Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error)
+ Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error)
TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error
}
@@ -85,8 +85,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/alphabet: global state is not set")
}
- p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
@@ -116,11 +114,6 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) {
ap.pwLock.Unlock()
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- return nil
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil
diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go
index e39f3abbd..b5d05a02e 100644
--- a/pkg/innerring/processors/balance/handlers.go
+++ b/pkg/innerring/processors/balance/handlers.go
@@ -1,6 +1,7 @@
package balance
import (
+ "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -10,20 +11,20 @@ import (
"go.uber.org/zap"
)
-func (bp *Processor) handleLock(ev event.Event) {
+func (bp *Processor) handleLock(ctx context.Context, ev event.Event) {
lock := ev.(balanceEvent.Lock)
- bp.log.Info(logs.Notification,
+ bp.log.Info(ctx, logs.Notification,
zap.String("type", "lock"),
zap.String("value", hex.EncodeToString(lock.ID())))
// send an event to the worker pool
err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool {
- return bp.processLock(&lock)
+ return bp.processLock(ctx, &lock)
})
if err != nil {
// there system can be moved into controlled degradation stage
- bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
+ bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained,
zap.Int("capacity", bp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go
index 86a9e15d0..0fd23d8ab 100644
--- a/pkg/innerring/processors/balance/handlers_test.go
+++ b/pkg/innerring/processors/balance/handlers_test.go
@@ -1,6 +1,7 @@
package balance
import (
+ "context"
"testing"
"time"
@@ -30,7 +31,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) {
})
require.NoError(t, err, "failed to create processor")
- processor.handleLock(balanceEvent.Lock{})
+ processor.handleLock(context.Background(), balanceEvent.Lock{})
for processor.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -56,7 +57,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) {
})
require.NoError(t, err, "failed to create processor")
- processor.handleLock(balanceEvent.Lock{})
+ processor.handleLock(context.Background(), balanceEvent.Lock{})
for processor.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -69,7 +70,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet() bool {
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@@ -83,7 +84,7 @@ type testFrostFSContractClient struct {
chequeCalls int
}
-func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error {
+func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error {
c.chequeCalls++
return nil
}
diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go
index 1d94fa454..60475908c 100644
--- a/pkg/innerring/processors/balance/process_assets.go
+++ b/pkg/innerring/processors/balance/process_assets.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
@@ -9,9 +11,9 @@ import (
// Process lock event by invoking Cheque method in main net to send assets
// back to the withdraw issuer.
-func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
- if !bp.alphabetState.IsAlphabet() {
- bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
+func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool {
+ if !bp.alphabetState.IsAlphabet(ctx) {
+ bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock)
return true
}
@@ -23,9 +25,9 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
prm.SetLock(lock.LockAccount())
prm.SetHash(lock.TxHash())
- err := bp.frostfsClient.Cheque(prm)
+ err := bp.frostfsClient.Cheque(ctx, prm)
if err != nil {
- bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
+ bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go
index 5cc849b5c..34203b74f 100644
--- a/pkg/innerring/processors/balance/processor.go
+++ b/pkg/innerring/processors/balance/processor.go
@@ -1,10 +1,10 @@
package balance
import (
+ "context"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -12,13 +12,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
// PrecisionConverter converts balance amount values.
@@ -27,7 +26,7 @@ type (
}
FrostFSClient interface {
- Cheque(p frostfscontract.ChequePrm) error
+ Cheque(ctx context.Context, p frostfscontract.ChequePrm) error
}
// Processor of events produced by balance contract in the morphchain.
@@ -68,8 +67,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/balance: balance precision converter is not set")
}
- p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err)
@@ -91,32 +88,16 @@ func New(p *Params) (*Processor, error) {
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- var parsers []event.NotificationParserInfo
-
- // new lock event
- lock := event.NotificationParserInfo{}
- lock.SetType(lockNotification)
- lock.SetScriptHash(bp.balanceSC)
- lock.SetParser(balanceEvent.ParseLock)
- parsers = append(parsers, lock)
-
- return parsers
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- var handlers []event.NotificationHandlerInfo
-
- // lock handler
- lock := event.NotificationHandlerInfo{}
- lock.SetType(lockNotification)
- lock.SetScriptHash(bp.balanceSC)
- lock.SetHandler(bp.handleLock)
- handlers = append(handlers, lock)
-
- return handlers
+ return []event.NotificationHandlerInfo{
+ {
+ Contract: bp.balanceSC,
+ Type: lockNotification,
+ Parser: balanceEvent.ParseLock,
+ Handlers: []event.Handler{bp.handleLock},
+ },
+ }
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go
index ba12ebb37..5334b9a1f 100644
--- a/pkg/innerring/processors/container/common.go
+++ b/pkg/innerring/processors/container/common.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/ecdsa"
"errors"
"fmt"
@@ -45,7 +46,7 @@ type signatureVerificationData struct {
// - v.binPublicKey is a public session key
// - session context corresponds to the container and verb in v
// - session is "alive"
-func (cp *Processor) verifySignature(v signatureVerificationData) error {
+func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error {
var err error
var key frostfsecdsa.PublicKeyRFC6979
keyProvided := v.binPublicKey != nil
@@ -58,7 +59,7 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error {
}
if len(v.binTokenSession) > 0 {
- return cp.verifyByTokenSession(v, &key, keyProvided)
+ return cp.verifyByTokenSession(ctx, v, &key, keyProvided)
}
if keyProvided {
@@ -77,8 +78,8 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error {
return errors.New("signature is invalid or calculated with the key not bound to the container owner")
}
-func (cp *Processor) checkTokenLifetime(token session.Container) error {
- curEpoch, err := cp.netState.Epoch()
+func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error {
+ curEpoch, err := cp.netState.Epoch(ctx)
if err != nil {
return fmt.Errorf("could not read current epoch: %w", err)
}
@@ -90,7 +91,7 @@ func (cp *Processor) checkTokenLifetime(token session.Container) error {
return nil
}
-func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
+func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
var tok session.Container
err := tok.Unmarshal(v.binTokenSession)
@@ -118,7 +119,7 @@ func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *fros
return errors.New("owner differs with token owner")
}
- err = cp.checkTokenLifetime(tok)
+ err = cp.checkTokenLifetime(ctx, tok)
if err != nil {
return fmt.Errorf("check session lifetime: %w", err)
}
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index 8bb89abe2..bb038a3cb 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -11,44 +12,40 @@ import (
"go.uber.org/zap"
)
-func (cp *Processor) handlePut(ev event.Event) {
+func (cp *Processor) handlePut(ctx context.Context, ev event.Event) {
put := ev.(putEvent)
id := sha256.Sum256(put.Container())
- cp.log.Info(logs.Notification,
+ cp.log.Info(ctx, logs.Notification,
zap.String("type", "container put"),
zap.String("id", base58.Encode(id[:])))
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool {
- return cp.processContainerPut(put)
+ return cp.processContainerPut(ctx, put)
})
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
+ cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
-func (cp *Processor) handleDelete(ev event.Event) {
+func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) {
del := ev.(containerEvent.Delete)
- cp.log.Info(logs.Notification,
+ cp.log.Info(ctx, logs.Notification,
zap.String("type", "container delete"),
zap.String("id", base58.Encode(del.ContainerID())))
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool {
- return cp.processContainerDelete(del)
+ return cp.processContainerDelete(ctx, del)
})
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
+ cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
-
-func (cp *Processor) handleSetEACL(_ event.Event) {
- cp.log.Warn(logs.SkipDeprecatedNotification, zap.String("type", "set EACL"))
-}
diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go
index dc1e919bb..1b3842eb0 100644
--- a/pkg/innerring/processors/container/handlers_test.go
+++ b/pkg/innerring/processors/container/handlers_test.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/ecdsa"
"encoding/hex"
"testing"
@@ -71,7 +72,7 @@ func TestPutEvent(t *testing.T) {
nr: nr,
}
- proc.handlePut(event)
+ proc.handlePut(context.Background(), event)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -143,7 +144,7 @@ func TestDeleteEvent(t *testing.T) {
Signature: signature,
}
- proc.handleDelete(ev)
+ proc.handleDelete(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -160,7 +161,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet() bool {
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@@ -169,11 +170,11 @@ type testNetworkState struct {
epoch uint64
}
-func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) {
+func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) {
return s.homHashDisabled, nil
}
-func (s *testNetworkState) Epoch() (uint64, error) {
+func (s *testNetworkState) Epoch(context.Context) (uint64, error) {
return s.epoch, nil
}
@@ -186,7 +187,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 {
return c.contractAddress
}
-func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) {
+func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
key := hex.EncodeToString(cid)
if cont, found := c.get[key]; found {
return cont, nil
@@ -236,6 +237,6 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction)
type testFrostFSIDClient struct{}
-func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
+func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
return &frostfsidclient.Subject{}, nil
}
diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go
index d89b63e82..8e4ab2623 100644
--- a/pkg/innerring/processors/container/process_container.go
+++ b/pkg/innerring/processors/container/process_container.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"errors"
"fmt"
"strings"
@@ -36,28 +37,28 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam
// Process a new container from the user by checking the container sanity
// and sending approve tx back to the morph.
-func (cp *Processor) processContainerPut(put putEvent) bool {
- if !cp.alphabetState.IsAlphabet() {
- cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
+func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool {
+ if !cp.alphabetState.IsAlphabet(ctx) {
+ cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut)
return true
}
- ctx := &putContainerContext{
+ pctx := &putContainerContext{
e: put,
}
- err := cp.checkPutContainer(ctx)
+ err := cp.checkPutContainer(ctx, pctx)
if err != nil {
- cp.log.Error(logs.ContainerPutContainerCheckFailed,
- zap.String("error", err.Error()),
+ cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
+ zap.Error(err),
)
return false
}
- if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil {
- cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
- zap.String("error", err.Error()),
+ if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
+ cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
+ zap.Error(err),
)
return false
}
@@ -65,8 +66,8 @@ func (cp *Processor) processContainerPut(put putEvent) bool {
return true
}
-func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
- binCnr := ctx.e.Container()
+func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error {
+ binCnr := pctx.e.Container()
var cnr containerSDK.Container
err := cnr.Unmarshal(binCnr)
@@ -74,12 +75,12 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
return fmt.Errorf("invalid binary container: %w", err)
}
- err = cp.verifySignature(signatureVerificationData{
+ err = cp.verifySignature(ctx, signatureVerificationData{
ownerContainer: cnr.Owner(),
verb: session.VerbContainerPut,
- binTokenSession: ctx.e.SessionToken(),
- binPublicKey: ctx.e.PublicKey(),
- signature: ctx.e.Signature(),
+ binTokenSession: pctx.e.SessionToken(),
+ binPublicKey: pctx.e.PublicKey(),
+ signature: pctx.e.Signature(),
signedData: binCnr,
})
if err != nil {
@@ -87,13 +88,13 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
}
// check homomorphic hashing setting
- err = checkHomomorphicHashing(cp.netState, cnr)
+ err = checkHomomorphicHashing(ctx, cp.netState, cnr)
if err != nil {
return fmt.Errorf("incorrect homomorphic hashing setting: %w", err)
}
// check native name and zone
- err = cp.checkNNS(ctx, cnr)
+ err = cp.checkNNS(ctx, pctx, cnr)
if err != nil {
return fmt.Errorf("NNS: %w", err)
}
@@ -103,24 +104,24 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
// Process delete container operation from the user by checking container sanity
// and sending approve tx back to morph.
-func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
- if !cp.alphabetState.IsAlphabet() {
- cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
+func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool {
+ if !cp.alphabetState.IsAlphabet(ctx) {
+ cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete)
return true
}
- err := cp.checkDeleteContainer(e)
+ err := cp.checkDeleteContainer(ctx, e)
if err != nil {
- cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
- zap.String("error", err.Error()),
+ cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
+ zap.Error(err),
)
return false
}
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
- cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
- zap.String("error", err.Error()),
+ cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
+ zap.Error(err),
)
return false
@@ -129,7 +130,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
return true
}
-func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error {
+func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error {
binCnr := e.ContainerID()
var idCnr cid.ID
@@ -140,12 +141,12 @@ func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error {
}
// receive owner of the related container
- cnr, err := cp.cnrClient.Get(binCnr)
+ cnr, err := cp.cnrClient.Get(ctx, binCnr)
if err != nil {
return fmt.Errorf("could not receive the container: %w", err)
}
- err = cp.verifySignature(signatureVerificationData{
+ err = cp.verifySignature(ctx, signatureVerificationData{
ownerContainer: cnr.Value.Owner(),
verb: session.VerbContainerDelete,
idContainerSet: true,
@@ -162,21 +163,21 @@ func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error {
return nil
}
-func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error {
+func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error {
// fetch domain info
- ctx.d = containerSDK.ReadDomain(cnr)
+ pctx.d = containerSDK.ReadDomain(cnr)
// if PutNamed event => check if values in container correspond to args
- if named, ok := ctx.e.(interface {
+ if named, ok := pctx.e.(interface {
Name() string
Zone() string
}); ok {
- if name := named.Name(); name != ctx.d.Name() {
- return fmt.Errorf("names differ %s/%s", name, ctx.d.Name())
+ if name := named.Name(); name != pctx.d.Name() {
+ return fmt.Errorf("names differ %s/%s", name, pctx.d.Name())
}
- if zone := named.Zone(); zone != ctx.d.Zone() {
- return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone())
+ if zone := named.Zone(); zone != pctx.d.Zone() {
+ return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone())
}
}
@@ -185,12 +186,12 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain
return fmt.Errorf("could not get container owner address: %w", err)
}
- subject, err := cp.frostFSIDClient.GetSubject(addr)
+ subject, err := cp.frostFSIDClient.GetSubject(ctx, addr)
if err != nil {
return fmt.Errorf("could not get subject from FrostfsID contract: %w", err)
}
- namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns")
+ namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns")
if !hasNamespace {
return nil
}
@@ -202,13 +203,13 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain
return nil
}
-func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error {
- netSetting, err := ns.HomomorphicHashDisabled()
+func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error {
+ netSetting, err := ns.HomomorphicHashDisabled(ctx)
if err != nil {
return fmt.Errorf("could not get setting in contract: %w", err)
}
- if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting {
+ if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting {
return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting)
}
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index 8fd9edfb8..9be93baa4 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -1,11 +1,11 @@
package container
import (
+ "context"
"errors"
"fmt"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -15,18 +15,17 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
ContClient interface {
ContractAddress() util.Uint160
- Get(cid []byte) (*containercore.Container, error)
+ Get(ctx context.Context, cid []byte) (*containercore.Container, error)
}
MorphClient interface {
@@ -34,7 +33,7 @@ type (
}
FrostFSIDClient interface {
- GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error)
+ GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error)
}
// Processor of events produced by container contract in the sidechain.
@@ -69,7 +68,7 @@ type NetworkState interface {
//
// Must return any error encountered
// which did not allow reading the value.
- Epoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
// HomomorphicHashDisabled must return boolean that
// represents homomorphic network state:
@@ -77,7 +76,7 @@ type NetworkState interface {
// * false if hashing is enabled.
//
// which did not allow reading the value.
- HomomorphicHashDisabled() (bool, error)
+ HomomorphicHashDisabled(ctx context.Context) (bool, error)
}
// New creates a container contract processor instance.
@@ -97,8 +96,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/container: FrostFSID client is not set")
}
- p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err)
@@ -121,11 +118,6 @@ func New(p *Params) (*Processor, error) {
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- return nil
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil
@@ -157,11 +149,6 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
p.SetParser(containerEvent.ParseDeleteNotary)
pp = append(pp, p)
- // set EACL
- p.SetRequestType(containerEvent.SetEACLNotaryEvent)
- p.SetParser(containerEvent.ParseSetEACLNotary)
- pp = append(pp, p)
-
return pp
}
@@ -190,10 +177,5 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
h.SetHandler(cp.handleDelete)
hh = append(hh, h)
- // set eACL
- h.SetRequestType(containerEvent.SetEACLNotaryEvent)
- h.SetHandler(cp.handleSetEACL)
- hh = append(hh, h)
-
return hh
}
diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go
index c80f9fdc5..936de2e77 100644
--- a/pkg/innerring/processors/frostfs/handlers.go
+++ b/pkg/innerring/processors/frostfs/handlers.go
@@ -2,6 +2,7 @@ package frostfs
import (
"bytes"
+ "context"
"encoding/hex"
"slices"
@@ -12,67 +13,67 @@ import (
"go.uber.org/zap"
)
-func (np *Processor) handleDeposit(ev event.Event) {
+func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) {
deposit := ev.(frostfsEvent.Deposit)
depositIDBin := bytes.Clone(deposit.ID())
slices.Reverse(depositIDBin)
- np.log.Info(logs.Notification,
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "deposit"),
zap.String("id", hex.EncodeToString(depositIDBin)))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool {
- return np.processDeposit(deposit)
+ return np.processDeposit(ctx, deposit)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleWithdraw(ev event.Event) {
+func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) {
withdraw := ev.(frostfsEvent.Withdraw)
withdrawBin := bytes.Clone(withdraw.ID())
slices.Reverse(withdrawBin)
- np.log.Info(logs.Notification,
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "withdraw"),
zap.String("id", hex.EncodeToString(withdrawBin)))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool {
- return np.processWithdraw(withdraw)
+ return np.processWithdraw(ctx, withdraw)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleCheque(ev event.Event) {
+func (np *Processor) handleCheque(ctx context.Context, ev event.Event) {
cheque := ev.(frostfsEvent.Cheque)
- np.log.Info(logs.Notification,
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "cheque"),
zap.String("id", hex.EncodeToString(cheque.ID())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool {
- return np.processCheque(cheque)
+ return np.processCheque(ctx, cheque)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleConfig(ev event.Event) {
+func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
cfg := ev.(frostfsEvent.Config)
- np.log.Info(logs.Notification,
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "set config"),
zap.String("key", hex.EncodeToString(cfg.Key())),
zap.String("value", hex.EncodeToString(cfg.Value())))
@@ -80,11 +81,11 @@ func (np *Processor) handleConfig(ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool {
- return np.processConfig(cfg)
+ return np.processConfig(ctx, cfg)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go
index 6425172bd..72310f6f9 100644
--- a/pkg/innerring/processors/frostfs/handlers_test.go
+++ b/pkg/innerring/processors/frostfs/handlers_test.go
@@ -1,6 +1,7 @@
package frostfs
import (
+ "context"
"testing"
"time"
@@ -36,7 +37,7 @@ func TestHandleDeposit(t *testing.T) {
AmountValue: 1000,
}
- proc.handleDeposit(ev)
+ proc.handleDeposit(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -57,7 +58,7 @@ func TestHandleDeposit(t *testing.T) {
es.epochCounter = 109
- proc.handleDeposit(ev)
+ proc.handleDeposit(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -98,7 +99,7 @@ func TestHandleWithdraw(t *testing.T) {
AmountValue: 1000,
}
- proc.handleWithdraw(ev)
+ proc.handleWithdraw(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -139,7 +140,7 @@ func TestHandleCheque(t *testing.T) {
LockValue: util.Uint160{200},
}
- proc.handleCheque(ev)
+ proc.handleCheque(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -176,7 +177,7 @@ func TestHandleConfig(t *testing.T) {
TxHashValue: util.Uint256{100},
}
- proc.handleConfig(ev)
+ proc.handleConfig(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -225,7 +226,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet() bool {
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@@ -241,17 +242,17 @@ type testBalaceClient struct {
burn []balance.BurnPrm
}
-func (c *testBalaceClient) Mint(p balance.MintPrm) error {
+func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error {
c.mint = append(c.mint, p)
return nil
}
-func (c *testBalaceClient) Lock(p balance.LockPrm) error {
+func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error {
c.lock = append(c.lock, p)
return nil
}
-func (c *testBalaceClient) Burn(p balance.BurnPrm) error {
+func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error {
c.burn = append(c.burn, p)
return nil
}
@@ -260,7 +261,7 @@ type testNetmapClient struct {
config []nmClient.SetConfigPrm
}
-func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error {
+func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error {
c.config = append(c.config, p)
return nil
}
diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go
index c72aeceee..d10eb9660 100644
--- a/pkg/innerring/processors/frostfs/process_assets.go
+++ b/pkg/innerring/processors/frostfs/process_assets.go
@@ -1,6 +1,8 @@
package frostfs
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -15,9 +17,9 @@ const (
// Process deposit event by invoking a balance contract and sending native
// gas in the sidechain.
-func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
+func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit)
return true
}
@@ -28,9 +30,9 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
prm.SetID(deposit.ID())
// send transferX to a balance contract
- err := np.balanceClient.Mint(prm)
+ err := np.balanceClient.Mint(ctx, prm)
if err != nil {
- np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
}
curEpoch := np.epochState.EpochCounter()
@@ -44,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
- np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
+ np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined,
zap.Stringer("receiver", receiver),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
@@ -56,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
// before gas transfer check if the balance is greater than the threshold
balance, err := np.morphClient.GasBalance()
if err != nil {
- np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
return false
}
if balance < np.gasBalanceThreshold {
- np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
+ np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached,
zap.Int64("balance", balance),
zap.Int64("threshold", np.gasBalanceThreshold))
@@ -70,8 +72,8 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
- np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
- zap.String("error", err.Error()))
+ np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
+ zap.Error(err))
return false
}
@@ -82,16 +84,16 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
}
// Process withdraw event by locking assets in the balance account.
-func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
+func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw)
return true
}
// create lock account
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
if err != nil {
- np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err))
return false
}
@@ -105,9 +107,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount()))
prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime))
- err = np.balanceClient.Lock(prm)
+ err = np.balanceClient.Lock(ctx, prm)
if err != nil {
- np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
return false
}
@@ -116,9 +118,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
// Process cheque event by transferring assets from the lock account back to
// the reserve account.
-func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
+func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque)
return true
}
@@ -128,9 +130,9 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount()))
prm.SetID(cheque.ID())
- err := np.balanceClient.Burn(prm)
+ err := np.balanceClient.Burn(ctx, prm)
if err != nil {
- np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go
index 2ae3e6ced..dc579f6bb 100644
--- a/pkg/innerring/processors/frostfs/process_config.go
+++ b/pkg/innerring/processors/frostfs/process_config.go
@@ -1,6 +1,8 @@
package frostfs
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -9,9 +11,9 @@ import (
// Process config event by setting configuration value from the mainchain in
// the sidechain.
-func (np *Processor) processConfig(config frostfsEvent.Config) bool {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
+func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig)
return true
}
@@ -22,9 +24,9 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool {
prm.SetValue(config.Value())
prm.SetHash(config.TxHash())
- err := np.netmapClient.SetConfig(prm)
+ err := np.netmapClient.SetConfig(ctx, prm)
if err != nil {
- np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go
index 2019857ac..9d3bf65cd 100644
--- a/pkg/innerring/processors/frostfs/processor.go
+++ b/pkg/innerring/processors/frostfs/processor.go
@@ -1,11 +1,11 @@
package frostfs
import (
+ "context"
"errors"
"fmt"
"sync"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -16,7 +16,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
@@ -27,7 +26,7 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
// PrecisionConverter converts balance amount values.
@@ -36,13 +35,13 @@ type (
}
BalanceClient interface {
- Mint(p balance.MintPrm) error
- Lock(p balance.LockPrm) error
- Burn(p balance.BurnPrm) error
+ Mint(ctx context.Context, p balance.MintPrm) error
+ Lock(ctx context.Context, p balance.LockPrm) error
+ Burn(ctx context.Context, p balance.BurnPrm) error
}
NetmapClient interface {
- SetConfig(p nmClient.SetConfigPrm) error
+ SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error
}
MorphClient interface {
@@ -110,8 +109,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/frostfs: balance precision converter is not set")
}
- p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
@@ -145,70 +142,34 @@ func New(p *Params) (*Processor, error) {
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- var (
- parsers = make([]event.NotificationParserInfo, 0, 6)
-
- p event.NotificationParserInfo
- )
-
- p.SetScriptHash(np.frostfsContract)
-
- // deposit event
- p.SetType(event.TypeFromString(depositNotification))
- p.SetParser(frostfsEvent.ParseDeposit)
- parsers = append(parsers, p)
-
- // withdraw event
- p.SetType(event.TypeFromString(withdrawNotification))
- p.SetParser(frostfsEvent.ParseWithdraw)
- parsers = append(parsers, p)
-
- // cheque event
- p.SetType(event.TypeFromString(chequeNotification))
- p.SetParser(frostfsEvent.ParseCheque)
- parsers = append(parsers, p)
-
- // config event
- p.SetType(event.TypeFromString(configNotification))
- p.SetParser(frostfsEvent.ParseConfig)
- parsers = append(parsers, p)
-
- return parsers
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- var (
- handlers = make([]event.NotificationHandlerInfo, 0, 6)
-
- h event.NotificationHandlerInfo
- )
-
- h.SetScriptHash(np.frostfsContract)
-
- // deposit handler
- h.SetType(event.TypeFromString(depositNotification))
- h.SetHandler(np.handleDeposit)
- handlers = append(handlers, h)
-
- // withdraw handler
- h.SetType(event.TypeFromString(withdrawNotification))
- h.SetHandler(np.handleWithdraw)
- handlers = append(handlers, h)
-
- // cheque handler
- h.SetType(event.TypeFromString(chequeNotification))
- h.SetHandler(np.handleCheque)
- handlers = append(handlers, h)
-
- // config handler
- h.SetType(event.TypeFromString(configNotification))
- h.SetHandler(np.handleConfig)
- handlers = append(handlers, h)
-
- return handlers
+ return []event.NotificationHandlerInfo{
+ {
+ Contract: np.frostfsContract,
+ Type: event.TypeFromString(depositNotification),
+ Parser: frostfsEvent.ParseDeposit,
+ Handlers: []event.Handler{np.handleDeposit},
+ },
+ {
+ Contract: np.frostfsContract,
+ Type: event.TypeFromString(withdrawNotification),
+ Parser: frostfsEvent.ParseWithdraw,
+ Handlers: []event.Handler{np.handleWithdraw},
+ },
+ {
+ Contract: np.frostfsContract,
+ Type: event.TypeFromString(chequeNotification),
+ Parser: frostfsEvent.ParseCheque,
+ Handlers: []event.Handler{np.handleCheque},
+ },
+ {
+ Contract: np.frostfsContract,
+ Type: event.TypeFromString(configNotification),
+ Parser: frostfsEvent.ParseConfig,
+ Handlers: []event.Handler{np.handleConfig},
+ },
+ }
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go
index fd7f539c3..7e8ab629d 100644
--- a/pkg/innerring/processors/governance/handlers.go
+++ b/pkg/innerring/processors/governance/handlers.go
@@ -1,6 +1,8 @@
package governance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -11,7 +13,7 @@ import (
"go.uber.org/zap"
)
-func (gp *Processor) HandleAlphabetSync(e event.Event) {
+func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) {
var (
typ string
hash util.Uint256
@@ -32,16 +34,16 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
return
}
- gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
+ gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ))
// send event to the worker pool
err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool {
- return gp.processAlphabetSync(hash)
+ return gp.processAlphabetSync(ctx, hash)
})
if err != nil {
// there system can be moved into controlled degradation stage
- gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
+ gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained,
zap.Int("capacity", gp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go
index b73e24318..864c5da67 100644
--- a/pkg/innerring/processors/governance/handlers_test.go
+++ b/pkg/innerring/processors/governance/handlers_test.go
@@ -1,6 +1,7 @@
package governance
import (
+ "context"
"encoding/binary"
"sort"
"testing"
@@ -8,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
- nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@@ -38,7 +38,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
- nm := &testNetmapClient{}
proc, err := New(
&Params{
@@ -50,7 +49,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
- NetmapClient: nm,
},
)
@@ -60,7 +58,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
txHash: util.Uint256{100},
}
- proc.HandleAlphabetSync(ev)
+ proc.HandleAlphabetSync(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -73,10 +71,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
- var irUpdateExp []nmClient.UpdateIRPrm
-
- require.EqualValues(t, irUpdateExp, nm.updates, "invalid IR updates")
-
var expAlphabetUpdate client.UpdateAlphabetListPrm
expAlphabetUpdate.SetHash(ev.txHash)
expAlphabetUpdate.SetList(testKeys.newInnerRingExp)
@@ -119,7 +113,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
- nm := &testNetmapClient{}
proc, err := New(
&Params{
@@ -131,7 +124,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
- NetmapClient: nm,
},
)
@@ -142,7 +134,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
Role: noderoles.NeoFSAlphabet,
}
- proc.HandleAlphabetSync(ev)
+ proc.HandleAlphabetSync(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -155,9 +147,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
- var irUpdatesExp []nmClient.UpdateIRPrm
- require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates")
-
var alpabetUpdExp client.UpdateAlphabetListPrm
alpabetUpdExp.SetList(testKeys.newInnerRingExp)
alpabetUpdExp.SetHash(ev.TxHash)
@@ -230,7 +219,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet() bool {
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@@ -238,7 +227,7 @@ type testVoter struct {
votes []VoteValidatorPrm
}
-func (v *testVoter) VoteForSidechainValidator(prm VoteValidatorPrm) error {
+func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error {
v.votes = append(v.votes, prm)
return nil
}
@@ -247,7 +236,7 @@ type testIRFetcher struct {
publicKeys keys.PublicKeys
}
-func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
+func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) {
return f.publicKeys, nil
}
@@ -262,12 +251,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) {
return c.commiteeKeys, nil
}
-func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error {
+func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error {
c.alphabetUpdates = append(c.alphabetUpdates, prm)
return nil
}
-func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error {
+func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error {
c.notaryUpdates = append(c.notaryUpdates, prm)
return nil
}
@@ -277,7 +266,7 @@ type testMainnetClient struct {
designateHash util.Uint160
}
-func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
+func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) {
return c.alphabetKeys, nil
}
@@ -289,16 +278,7 @@ type testFrostFSClient struct {
updates []frostfscontract.AlphabetUpdatePrm
}
-func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error {
- c.updates = append(c.updates, p)
- return nil
-}
-
-type testNetmapClient struct {
- updates []nmClient.UpdateIRPrm
-}
-
-func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error {
+func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error {
c.updates = append(c.updates, p)
return nil
}
diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go
index 50ba58e77..6e22abb3c 100644
--- a/pkg/innerring/processors/governance/process_update.go
+++ b/pkg/innerring/processors/governance/process_update.go
@@ -1,6 +1,7 @@
package governance
import (
+ "context"
"encoding/binary"
"encoding/hex"
"sort"
@@ -18,39 +19,39 @@ const (
alphabetUpdateIDPrefix = "AlphabetUpdate"
)
-func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
- if !gp.alphabetState.IsAlphabet() {
- gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
+func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool {
+ if !gp.alphabetState.IsAlphabet(ctx) {
+ gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
return true
}
- mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
+ mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx)
if err != nil {
- gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
+ zap.Error(err))
return false
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
- gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
+ zap.Error(err))
return false
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
- gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
+ zap.Error(err))
return false
}
if newAlphabet == nil {
- gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
+ gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
return true
}
- gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
+ gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
zap.String("new_alphabet", prettyKeys(newAlphabet)),
)
@@ -61,22 +62,22 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
}
// 1. Vote to sidechain committee via alphabet contracts.
- err = gp.voter.VoteForSidechainValidator(votePrm)
+ err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
if err != nil {
- gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
+ zap.Error(err))
}
// 2. Update NeoFSAlphabet role in the sidechain.
- gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash)
+ gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash)
// 3. Update notary role in the sidechain.
- gp.updateNotaryRoleInSidechain(newAlphabet, txHash)
+ gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash)
// 4. Update FrostFS contract in the mainnet.
- gp.updateFrostFSContractInMainnet(newAlphabet)
+ gp.updateFrostFSContractInMainnet(ctx, newAlphabet)
- gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
+ gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate)
return true
}
@@ -93,24 +94,24 @@ func prettyKeys(keys keys.PublicKeys) string {
return strings.TrimRight(sb.String(), delimiter)
}
-func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
- innerRing, err := gp.irFetcher.InnerRingKeys()
+func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
+ innerRing, err := gp.irFetcher.InnerRingKeys(ctx)
if err != nil {
- gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
+ zap.Error(err))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
- gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
+ zap.Error(err))
return
}
sort.Sort(newInnerRing)
- gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
+ gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList,
zap.String("before", prettyKeys(innerRing)),
zap.String("after", prettyKeys(newInnerRing)),
)
@@ -119,26 +120,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
updPrm.SetList(newInnerRing)
updPrm.SetHash(txHash)
- if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil {
- gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
- zap.String("error", err.Error()))
+ if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
+ gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
+ zap.Error(err))
}
}
-func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) {
+func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) {
updPrm := client.UpdateNotaryListPrm{}
updPrm.SetList(newAlphabet)
updPrm.SetHash(txHash)
- err := gp.morphClient.UpdateNotaryList(updPrm)
+ err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
if err != nil {
- gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
+ zap.Error(err))
}
}
-func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) {
+func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) {
epoch := gp.epochState.EpochCounter()
buf := make([]byte, 8)
@@ -151,9 +152,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
prm.SetID(id)
prm.SetPubs(newAlphabet)
- err := gp.frostfsClient.AlphabetUpdate(prm)
+ err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
if err != nil {
- gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
+ zap.Error(err))
}
}
diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go
index fa267eade..2d131edda 100644
--- a/pkg/innerring/processors/governance/processor.go
+++ b/pkg/innerring/processors/governance/processor.go
@@ -1,6 +1,7 @@
package governance
import (
+ "context"
"errors"
"fmt"
@@ -25,7 +26,7 @@ const ProcessorPoolSize = 1
type (
// AlphabetState is a callback interface for innerring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
)
@@ -38,7 +39,7 @@ type VoteValidatorPrm struct {
// Voter is a callback interface for alphabet contract voting.
type Voter interface {
- VoteForSidechainValidator(VoteValidatorPrm) error
+ VoteForSidechainValidator(context.Context, VoteValidatorPrm) error
}
type (
@@ -51,11 +52,11 @@ type (
// Implementation must take into account availability of
// the notary contract.
IRFetcher interface {
- InnerRingKeys() (keys.PublicKeys, error)
+ InnerRingKeys(ctx context.Context) (keys.PublicKeys, error)
}
FrostFSClient interface {
- AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error
+ AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error
}
NetmapClient interface {
@@ -63,14 +64,14 @@ type (
}
MainnetClient interface {
- NeoFSAlphabetList() (res keys.PublicKeys, err error)
+ NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error)
GetDesignateHash() util.Uint160
}
MorphClient interface {
Committee() (res keys.PublicKeys, err error)
- UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error
- UpdateNotaryList(prm client.UpdateNotaryListPrm) error
+ UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error
+ UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error
}
// Processor of events related to governance in the network.
@@ -79,7 +80,6 @@ type (
metrics metrics.Register
pool *ants.Pool
frostfsClient FrostFSClient
- netmapClient NetmapClient
alphabetState AlphabetState
epochState EpochState
@@ -105,7 +105,6 @@ type (
MorphClient MorphClient
MainnetClient MainnetClient
FrostFSClient FrostFSClient
- NetmapClient NetmapClient
}
)
@@ -146,7 +145,6 @@ func New(p *Params) (*Processor, error) {
metrics: metricsRegister,
pool: pool,
frostfsClient: p.FrostFSClient,
- netmapClient: p.NetmapClient,
alphabetState: p.AlphabetState,
epochState: p.EpochState,
voter: p.Voter,
@@ -157,22 +155,16 @@ func New(p *Params) (*Processor, error) {
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- var pi event.NotificationParserInfo
- pi.SetScriptHash(gp.designate)
- pi.SetType(event.TypeFromString(native.DesignationEventName))
- pi.SetParser(rolemanagement.ParseDesignate)
- return []event.NotificationParserInfo{pi}
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- var hi event.NotificationHandlerInfo
- hi.SetScriptHash(gp.designate)
- hi.SetType(event.TypeFromString(native.DesignationEventName))
- hi.SetHandler(gp.HandleAlphabetSync)
- return []event.NotificationHandlerInfo{hi}
+ return []event.NotificationHandlerInfo{
+ {
+ Contract: gp.designate,
+ Type: event.TypeFromString(native.DesignationEventName),
+ Parser: rolemanagement.ParseDesignate,
+ Handlers: []event.Handler{gp.HandleAlphabetSync},
+ },
+ }
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go
index c18611569..abd5b089a 100644
--- a/pkg/innerring/processors/netmap/cleanup_table.go
+++ b/pkg/innerring/processors/netmap/cleanup_table.go
@@ -60,7 +60,7 @@ func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) {
}
access.binNodeInfo = binNodeInfo
- access.maintenance = nmNodes[i].IsMaintenance()
+ access.maintenance = nmNodes[i].Status().IsMaintenance()
newMap[keyString] = access
}
diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go
index ae5620733..208bd5496 100644
--- a/pkg/innerring/processors/netmap/cleanup_table_test.go
+++ b/pkg/innerring/processors/netmap/cleanup_table_test.go
@@ -127,7 +127,7 @@ func TestCleanupTable(t *testing.T) {
t.Run("skip maintenance nodes", func(t *testing.T) {
cnt := 0
- infos[1].SetMaintenance()
+ infos[1].SetStatus(netmap.Maintenance)
key := netmap.StringifyPublicKey(infos[1])
c.update(networkMap, 5)
diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go
index c6053e281..4c7199a49 100644
--- a/pkg/innerring/processors/netmap/handlers.go
+++ b/pkg/innerring/processors/netmap/handlers.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -11,93 +12,93 @@ import (
"go.uber.org/zap"
)
-func (np *Processor) HandleNewEpochTick(ev event.Event) {
+func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
- np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
+ np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch"))
// send an event to the worker pool
- err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick)
+ err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleNewEpoch(ev event.Event) {
+func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
- np.log.Info(logs.Notification,
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
- return np.processNewEpoch(epochEvent)
+ return np.processNewEpoch(ctx, epochEvent)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleAddPeer(ev event.Event) {
+func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
- np.log.Info(logs.Notification,
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "add peer"),
)
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
- return np.processAddPeer(newPeer)
+ return np.processAddPeer(ctx, newPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleUpdateState(ev event.Event) {
+func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
- np.log.Info(logs.Notification,
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
- return np.processUpdatePeer(updPeer)
+ return np.processUpdatePeer(ctx, updPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleCleanupTick(ev event.Event) {
+func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
if !np.netmapSnapshot.enabled {
- np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
+ np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
return
}
cleanup := ev.(netmapCleanupTick)
- np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
+ np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
- return np.processNetmapCleanupTick(cleanup)
+ return np.processNetmapCleanupTick(ctx, cleanup)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go
index b34abb78c..934c3790d 100644
--- a/pkg/innerring/processors/netmap/handlers_test.go
+++ b/pkg/innerring/processors/netmap/handlers_test.go
@@ -1,19 +1,19 @@
package netmap
import (
+ "context"
"fmt"
"testing"
"time"
- v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -39,7 +39,7 @@ func TestNewEpochTick(t *testing.T) {
require.NoError(t, err, "failed to create processor")
ev := timerEvent.NewEpochTick{}
- proc.HandleNewEpochTick(ev)
+ proc.HandleNewEpochTick(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -68,7 +68,6 @@ func TestNewEpoch(t *testing.T) {
duration: 10,
}
r := &testEpochResetter{}
- cc := &testContainerClient{}
nc := &testNetmapClient{
epochDuration: 20,
txHeights: map[util.Uint256]uint32{
@@ -82,7 +81,6 @@ func TestNewEpoch(t *testing.T) {
p.NotaryDepositHandler = eh.Handle
p.AlphabetSyncHandler = eh.Handle
p.NetmapClient = nc
- p.ContainerWrapper = cc
p.EpochTimer = r
p.EpochState = es
})
@@ -93,7 +91,7 @@ func TestNewEpoch(t *testing.T) {
Num: 101,
Hash: util.Uint256{101},
}
- proc.handleNewEpoch(ev)
+ proc.handleNewEpoch(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -103,11 +101,6 @@ func TestNewEpoch(t *testing.T) {
require.Equal(t, ev.Num, es.counter, "invalid epoch counter")
require.EqualValues(t, []uint32{nc.txHeights[ev.Hash]}, r.timers, "invalid epoch timer resets")
- var expEstimation cntClient.StartEstimationPrm
- expEstimation.SetEpoch(ev.Num - 1)
- expEstimation.SetHash(ev.Hash)
- require.EqualValues(t, []cntClient.StartEstimationPrm{expEstimation}, cc.estimations, "invalid estimations")
-
require.EqualValues(t, []event.Event{
governance.NewSyncEvent(ev.TxHash()),
ev,
@@ -138,7 +131,7 @@ func TestAddPeer(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
- proc.handleAddPeer(ev)
+ proc.handleAddPeer(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -146,14 +139,14 @@ func TestAddPeer(t *testing.T) {
require.Nil(t, nc.notaryInvokes, "invalid notary invokes")
- node.SetOnline()
+ node.SetStatus(netmap.Online)
ev = netmapEvent.AddPeer{
NodeBytes: node.Marshal(),
Request: &payload.P2PNotaryRequest{
MainTransaction: &transaction.Transaction{},
},
}
- proc.handleAddPeer(ev)
+ proc.handleAddPeer(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -196,7 +189,7 @@ func TestUpdateState(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
- proc.handleUpdateState(ev)
+ proc.handleUpdateState(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -240,7 +233,7 @@ func TestCleanupTick(t *testing.T) {
txHash: util.Uint256{123},
}
- proc.handleCleanupTick(ev)
+ proc.handleCleanupTick(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -274,7 +267,6 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
as := &testAlphabetState{
isAlphabet: true,
}
- cc := &testContainerClient{}
nc := &testNetmapClient{}
eh := &testEventHandler{}
@@ -288,7 +280,6 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
EpochState: es,
EpochTimer: r,
AlphabetState: as,
- ContainerWrapper: cc,
NetmapClient: nc,
NotaryDepositHandler: eh.Handle,
AlphabetSyncHandler: eh.Handle,
@@ -303,7 +294,7 @@ type testNodeStateSettings struct {
maintAllowed bool
}
-func (s *testNodeStateSettings) MaintenanceModeAllowed() error {
+func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error {
if s.maintAllowed {
return nil
}
@@ -312,7 +303,7 @@ func (s *testNodeStateSettings) MaintenanceModeAllowed() error {
type testValidator struct{}
-func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error {
+func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error {
return nil
}
@@ -350,19 +341,10 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet() bool {
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
-type testContainerClient struct {
- estimations []cntClient.StartEstimationPrm
-}
-
-func (c *testContainerClient) StartEstimation(p cntClient.StartEstimationPrm) error {
- c.estimations = append(c.estimations, p)
- return nil
-}
-
type notaryInvoke struct {
contract util.Uint160
fee fixedn.Fixed8
@@ -383,7 +365,7 @@ type testNetmapClient struct {
invokedTxs []*transaction.Transaction
}
-func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{
contract: contract,
fee: fee,
@@ -399,7 +381,7 @@ func (c *testNetmapClient) ContractAddress() util.Uint160 {
return c.contractAddress
}
-func (c *testNetmapClient) EpochDuration() (uint64, error) {
+func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) {
return c.epochDuration, nil
}
@@ -410,11 +392,11 @@ func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) {
return 0, fmt.Errorf("not found")
}
-func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
+func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) {
return c.netmap, nil
}
-func (c *testNetmapClient) NewEpoch(epoch uint64) error {
+func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error {
c.newEpochs = append(c.newEpochs, epoch)
return nil
}
@@ -432,6 +414,6 @@ type testEventHandler struct {
handledEvents []event.Event
}
-func (h *testEventHandler) Handle(e event.Event) {
+func (h *testEventHandler) Handle(_ context.Context, e event.Event) {
h.handledEvents = append(h.handledEvents, e)
}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
index 5e0558344..b81dc9989 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
@@ -1,6 +1,7 @@
package locode
import (
+ "context"
"errors"
"fmt"
@@ -29,7 +30,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record
// - Continent: R.Continent().String().
//
// UN-LOCODE attribute remains untouched.
-func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
+func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error {
attrLocode := n.LOCODE()
if attrLocode == "" {
return nil
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
index 8ab174dfd..fa2dd1ac1 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
@@ -1,6 +1,7 @@
package locode_test
import (
+ "context"
"errors"
"fmt"
"testing"
@@ -92,7 +93,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
t.Run("w/o locode", func(t *testing.T) {
n := nodeInfoWithSomeAttrs()
- err := validator.VerifyAndUpdate(n)
+ err := validator.VerifyAndUpdate(context.Background(), n)
require.NoError(t, err)
})
@@ -102,7 +103,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttrValue(n, "WRONG LOCODE")
- err := validator.VerifyAndUpdate(n)
+ err := validator.VerifyAndUpdate(context.Background(), n)
require.Error(t, err)
})
@@ -111,7 +112,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"})
- err := validator.VerifyAndUpdate(n)
+ err := validator.VerifyAndUpdate(context.Background(), n)
require.Error(t, err)
})
@@ -119,7 +120,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, r.LOCODE)
- err := validator.VerifyAndUpdate(n)
+ err := validator.VerifyAndUpdate(context.Background(), n)
require.NoError(t, err)
require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode"))
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
index 8f6667933..ba5db9205 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
@@ -8,38 +8,38 @@ import (
// Record is an interface of read-only
// FrostFS LOCODE database single entry.
type Record interface {
- // Must return ISO 3166-1 alpha-2
+ // CountryCode must return ISO 3166-1 alpha-2
// country code.
//
// Must not return nil.
CountryCode() *locodedb.CountryCode
- // Must return English short country name
+ // CountryName must return English short country name
// officially used by the ISO 3166
// Maintenance Agency (ISO 3166/MA).
CountryName() string
- // Must return UN/LOCODE 3-character code
+ // LocationCode must return UN/LOCODE 3-character code
// for the location (numerals 2-9 may also
// be used).
//
// Must not return nil.
LocationCode() *locodedb.LocationCode
- // Must return name of the location which
+ // LocationName must return name of the location which
// have been allocated a UN/LOCODE without
// diacritic sign.
LocationName() string
- // Must return ISO 1-3 character alphabetic
+ // SubDivCode Must return ISO 1-3 character alphabetic
// and/or numeric code for the administrative
// division of the country concerned.
SubDivCode() string
- // Must return subdivision name.
+ // SubDivName must return subdivision name.
SubDivName() string
- // Must return existing continent where is
+ // Continent must return existing continent where is
// the location.
//
// Must not return nil.
@@ -49,7 +49,7 @@ type Record interface {
// DB is an interface of read-only
// FrostFS LOCODE database.
type DB interface {
- // Must find the record that corresponds to
+ // Get must find the record that corresponds to
// LOCODE and provides the Record interface.
//
// Must return an error if Record is nil.
diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
index 126f36582..0e4628ac7 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
@@ -1,6 +1,7 @@
package maddress
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -8,7 +9,7 @@ import (
)
// VerifyAndUpdate calls network.VerifyAddress.
-func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
+func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error {
err := network.VerifyMultiAddress(*n)
if err != nil {
return fmt.Errorf("could not verify multiaddress: %w", err)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
index 4094e50a5..03c41a451 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
@@ -7,6 +7,7 @@ map candidates.
package state
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -23,7 +24,7 @@ type NetworkSettings interface {
// no error if allowed;
// ErrMaintenanceModeDisallowed if disallowed;
// other error if there are any problems with the check.
- MaintenanceModeAllowed() error
+ MaintenanceModeAllowed(ctx context.Context) error
}
// NetMapCandidateValidator represents tool which checks state of nodes which
@@ -55,13 +56,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting
// MUST NOT be called before SetNetworkSettings.
//
// See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods.
-func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error {
- if node.IsOnline() {
+func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error {
+ if node.Status().IsOnline() {
return nil
}
- if node.IsMaintenance() {
- return x.netSettings.MaintenanceModeAllowed()
+ if node.Status().IsMaintenance() {
+ return x.netSettings.MaintenanceModeAllowed(ctx)
}
return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE")
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
index a557628f0..cbf48a710 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
@@ -1,6 +1,7 @@
package state_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@@ -13,7 +14,7 @@ type testNetworkSettings struct {
disallowed bool
}
-func (x testNetworkSettings) MaintenanceModeAllowed() error {
+func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error {
if x.disallowed {
return state.ErrMaintenanceModeDisallowed
}
@@ -41,22 +42,22 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
},
{
name: "ONLINE",
- preparer: (*netmap.NodeInfo).SetOnline,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) },
valid: true,
},
{
name: "OFFLINE",
- preparer: (*netmap.NodeInfo).SetOffline,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) },
valid: false,
},
{
name: "MAINTENANCE/allowed",
- preparer: (*netmap.NodeInfo).SetMaintenance,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
valid: true,
},
{
name: "MAINTENANCE/disallowed",
- preparer: (*netmap.NodeInfo).SetMaintenance,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
valid: false,
validatorPreparer: func(v *state.NetMapCandidateValidator) {
var s testNetworkSettings
@@ -81,7 +82,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
testCase.validatorPreparer(&v)
}
- err := v.VerifyAndUpdate(&node)
+ err := v.VerifyAndUpdate(context.Background(), &node)
if testCase.valid {
require.NoError(t, err, testCase.name)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go
index e9b24e024..3dbe98a8d 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/validator.go
@@ -1,6 +1,8 @@
package nodevalidation
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -26,9 +28,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator {
// VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators.
//
// If error appears, returns it immediately.
-func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error {
+func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error {
for _, v := range c.validators {
- if err := v.VerifyAndUpdate(ni); err != nil {
+ if err := v.VerifyAndUpdate(ctx, ni); err != nil {
return err
}
}
diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go
index 170c39e2c..8f8cc17ff 100644
--- a/pkg/innerring/processors/netmap/process_cleanup.go
+++ b/pkg/innerring/processors/netmap/process_cleanup.go
@@ -1,15 +1,17 @@
package netmap
import (
- v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
-func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
+func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
return true
}
@@ -17,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
key, err := keys.NewPublicKeyFromString(s)
if err != nil {
- np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
+ np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode,
zap.String("key", s))
return nil
}
- np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
+ np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
// In notary environments we call UpdateStateIR method instead of UpdateState.
// It differs from UpdateState only by name, so we can do this in the same form.
@@ -31,6 +33,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
const methodUpdateStateNotary = "updateStateIR"
err = np.netmapClient.MorphNotaryInvoke(
+ ctx,
np.netmapClient.ContractAddress(),
0,
uint32(ev.epoch),
@@ -39,14 +42,14 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
int64(v2netmap.Offline), key.Bytes(),
)
if err != nil {
- np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
+ np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
}
return nil
})
if err != nil {
- np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
- zap.String("error", err.Error()))
+ np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
+ zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go
index 4dfa3997b..7c78d24a5 100644
--- a/pkg/innerring/processors/netmap/process_epoch.go
+++ b/pkg/innerring/processors/netmap/process_epoch.go
@@ -1,22 +1,23 @@
package netmap
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"go.uber.org/zap"
)
// Process new epoch notification by setting global epoch value and resetting
// local epoch timer.
-func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
+func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool {
epoch := ev.EpochNumber()
- epochDuration, err := np.netmapClient.EpochDuration()
+ epochDuration, err := np.netmapClient.EpochDuration(ctx)
if err != nil {
- np.log.Warn(logs.NetmapCantGetEpochDuration,
- zap.String("error", err.Error()))
+ np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
+ zap.Error(err))
} else {
np.epochState.SetEpochDuration(epochDuration)
}
@@ -25,60 +26,46 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
if err != nil {
- np.log.Warn(logs.NetmapCantGetTransactionHeight,
+ np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
- np.log.Warn(logs.NetmapCantResetEpochTimer,
- zap.String("error", err.Error()))
+ np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
+ zap.Error(err))
}
// get new netmap snapshot
- networkMap, err := np.netmapClient.NetMap()
+ networkMap, err := np.netmapClient.NetMap(ctx)
if err != nil {
- np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
- zap.String("error", err.Error()))
+ np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
+ zap.Error(err))
return false
}
- prm := cntClient.StartEstimationPrm{}
-
- prm.SetEpoch(epoch - 1)
- prm.SetHash(ev.TxHash())
-
- if epoch > 0 && np.alphabetState.IsAlphabet() { // estimates are invalid in genesis epoch
- err = np.containerWrp.StartEstimation(prm)
- if err != nil {
- np.log.Warn(logs.NetmapCantStartContainerSizeEstimation,
- zap.Uint64("epoch", epoch),
- zap.String("error", err.Error()))
- }
- }
-
np.netmapSnapshot.update(*networkMap, epoch)
- np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
- np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash()))
- np.handleNotaryDeposit(ev)
+ np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
+ np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash()))
+ np.handleNotaryDeposit(ctx, ev)
return true
}
// Process new epoch tick by invoking new epoch method in network map contract.
-func (np *Processor) processNewEpochTick() bool {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
+func (np *Processor) processNewEpochTick(ctx context.Context) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
return true
}
nextEpoch := np.epochState.EpochCounter() + 1
- np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
+ np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
- err := np.netmapClient.NewEpoch(nextEpoch)
+ err := np.netmapClient.NewEpoch(ctx, nextEpoch)
if err != nil {
- np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
+ np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index 9e6e8c283..b5c727cc7 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -12,9 +13,9 @@ import (
// Process add peer notification by sanity check of new node
// local epoch timer.
-func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
+func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
return true
}
@@ -22,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
tx := ev.NotaryRequest().MainTransaction
ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
if err != nil || !ok {
- np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
+ np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction,
zap.String("method", "netmap.AddPeer"),
zap.String("hash", tx.Hash().StringLE()),
zap.Error(err))
@@ -33,15 +34,15 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
var nodeInfo netmap.NodeInfo
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
// it will be nice to have tx id at event structure to log it
- np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
+ np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate)
return false
}
// validate and update node info
- err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
+ err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo)
if err != nil {
- np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
- zap.String("error", err.Error()),
+ np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
+ zap.Error(err),
)
return false
@@ -62,8 +63,8 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
// But there is no guarantee that code will be executed in the same order.
// That is why we need to perform `addPeerIR` only in case when node is online,
// because in scope of this method, contract set state `ONLINE` for the node.
- if updated && nodeInfo.IsOnline() {
- np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
+ if updated && nodeInfo.Status().IsOnline() {
+ np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
prm := netmapclient.AddPeerPrm{}
@@ -76,6 +77,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
// create new notary request with the original nonce
err = np.netmapClient.MorphNotaryInvoke(
+ ctx,
np.netmapClient.ContractAddress(),
0,
ev.NotaryRequest().MainTransaction.Nonce,
@@ -84,7 +86,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
nodeInfoBinary,
)
if err != nil {
- np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
+ np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
return false
}
}
@@ -93,9 +95,9 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
}
// Process update peer notification by sending approval tx to the smart contract.
-func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
+func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
return true
}
@@ -106,9 +108,9 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
var err error
if ev.Maintenance() {
- err = np.nodeStateSettings.MaintenanceModeAllowed()
+ err = np.nodeStateSettings.MaintenanceModeAllowed(ctx)
if err != nil {
- np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
+ np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState,
zap.Error(err),
)
@@ -117,7 +119,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
}
if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
- np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
+ np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index e8fb8721b..277bca1c3 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -1,13 +1,12 @@
package netmap
import (
+ "context"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -17,7 +16,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
@@ -36,14 +34,14 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
// NodeValidator wraps basic method of checking the correctness
// of information about the node and its finalization for adding
// to the network map.
NodeValidator interface {
- // Must verify and optionally update NodeInfo structure.
+ // VerifyAndUpdate must verify and optionally update NodeInfo structure.
//
// Must return an error if NodeInfo input is invalid.
// Must return an error if it is not possible to correctly
@@ -51,24 +49,20 @@ type (
//
// If no error occurs, the parameter must point to the
// ready-made NodeInfo structure.
- VerifyAndUpdate(*netmap.NodeInfo) error
+ VerifyAndUpdate(context.Context, *netmap.NodeInfo) error
}
Client interface {
- MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
+ MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
ContractAddress() util.Uint160
- EpochDuration() (uint64, error)
+ EpochDuration(ctx context.Context) (uint64, error)
MorphTxHeight(h util.Uint256) (res uint32, err error)
- NetMap() (*netmap.NetMap, error)
- NewEpoch(epoch uint64) error
+ NetMap(ctx context.Context) (*netmap.NetMap, error)
+ NewEpoch(ctx context.Context, epoch uint64) error
MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error
}
- ContainerClient interface {
- StartEstimation(p cntClient.StartEstimationPrm) error
- }
-
// Processor of events produced by network map contract
// and new epoch ticker, because it is related to contract.
Processor struct {
@@ -80,7 +74,6 @@ type (
alphabetState AlphabetState
netmapClient Client
- containerWrp ContainerClient
netmapSnapshot cleanupTable
@@ -103,7 +96,6 @@ type (
AlphabetState AlphabetState
CleanupEnabled bool
CleanupThreshold uint64 // in epochs
- ContainerWrapper ContainerClient
AlphabetSyncHandler event.Handler
NotaryDepositHandler event.Handler
@@ -133,16 +125,12 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/netmap: alphabet sync handler is not set")
case p.NotaryDepositHandler == nil:
return nil, errors.New("ir/netmap: notary deposit handler is not set")
- case p.ContainerWrapper == nil:
- return nil, errors.New("ir/netmap: container contract wrapper is not set")
case p.NodeValidator == nil:
return nil, errors.New("ir/netmap: node validator is not set")
case p.NodeStateSettings == nil:
return nil, errors.New("ir/netmap: node state settings is not set")
}
- p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err)
@@ -161,7 +149,6 @@ func New(p *Params) (*Processor, error) {
epochState: p.EpochState,
alphabetState: p.AlphabetState,
netmapClient: p.NetmapClient,
- containerWrp: p.ContainerWrapper,
netmapSnapshot: newCleanupTable(p.CleanupEnabled, p.CleanupThreshold),
handleAlphabetSync: p.AlphabetSyncHandler,
@@ -174,36 +161,16 @@ func New(p *Params) (*Processor, error) {
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- parsers := make([]event.NotificationParserInfo, 0, 3)
-
- var p event.NotificationParserInfo
-
- p.SetScriptHash(np.netmapClient.ContractAddress())
-
- // new epoch event
- p.SetType(newEpochNotification)
- p.SetParser(netmapEvent.ParseNewEpoch)
- parsers = append(parsers, p)
-
- return parsers
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- handlers := make([]event.NotificationHandlerInfo, 0, 3)
-
- var i event.NotificationHandlerInfo
-
- i.SetScriptHash(np.netmapClient.ContractAddress())
-
- // new epoch handler
- i.SetType(newEpochNotification)
- i.SetHandler(np.handleNewEpoch)
- handlers = append(handlers, i)
-
- return handlers
+ return []event.NotificationHandlerInfo{
+ {
+ Contract: np.netmapClient.ContractAddress(),
+ Type: newEpochNotification,
+ Parser: netmapEvent.ParseNewEpoch,
+ Handlers: []event.Handler{np.handleNewEpoch},
+ },
+ }
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go
index e75fdaf40..310f12248 100644
--- a/pkg/innerring/processors/netmap/wrappers.go
+++ b/pkg/innerring/processors/netmap/wrappers.go
@@ -1,6 +1,8 @@
package netmap
import (
+ "context"
+
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@@ -18,13 +20,13 @@ type netmapClientWrapper struct {
netmapClient *netmapclient.Client
}
-func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error {
- _, err := w.netmapClient.UpdatePeerState(p)
+func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error {
+ _, err := w.netmapClient.UpdatePeerState(ctx, p)
return err
}
-func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
- _, err := w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...)
+func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+ _, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...)
return err
}
@@ -32,28 +34,28 @@ func (w *netmapClientWrapper) ContractAddress() util.Uint160 {
return w.netmapClient.ContractAddress()
}
-func (w *netmapClientWrapper) EpochDuration() (uint64, error) {
- return w.netmapClient.EpochDuration()
+func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) {
+ return w.netmapClient.EpochDuration(ctx)
}
func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) {
return w.netmapClient.Morph().TxHeight(h)
}
-func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) {
- return w.netmapClient.NetMap()
+func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) {
+ return w.netmapClient.NetMap(ctx)
}
-func (w *netmapClientWrapper) NewEpoch(epoch uint64) error {
- return w.netmapClient.NewEpoch(epoch)
+func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error {
+ return w.netmapClient.NewEpoch(ctx, epoch)
}
func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
return w.netmapClient.Morph().IsValidScript(script, signers)
}
-func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error {
- return w.netmapClient.AddPeer(p)
+func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error {
+ return w.netmapClient.AddPeer(ctx, p)
}
func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go
index d3071faad..0ef771359 100644
--- a/pkg/innerring/state.go
+++ b/pkg/innerring/state.go
@@ -1,6 +1,7 @@
package innerring
import (
+ "context"
"fmt"
"sort"
@@ -47,21 +48,21 @@ func (s *Server) SetEpochDuration(val uint64) {
}
// IsActive is a getter for a global active flag state.
-func (s *Server) IsActive() bool {
- return s.InnerRingIndex() >= 0
+func (s *Server) IsActive(ctx context.Context) bool {
+ return s.InnerRingIndex(ctx) >= 0
}
// IsAlphabet is a getter for a global alphabet flag state.
-func (s *Server) IsAlphabet() bool {
- return s.AlphabetIndex() >= 0
+func (s *Server) IsAlphabet(ctx context.Context) bool {
+ return s.AlphabetIndex(ctx) >= 0
}
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative
// index means that node is not in the inner ring list.
-func (s *Server) InnerRingIndex() int {
- index, err := s.statusIndex.InnerRingIndex()
+func (s *Server) InnerRingIndex(ctx context.Context) int {
+ index, err := s.statusIndex.InnerRingIndex(ctx)
if err != nil {
- s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
+ s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
return -1
}
@@ -70,10 +71,10 @@ func (s *Server) InnerRingIndex() int {
// InnerRingSize is a getter for a global size of inner ring list. This value
// paired with inner ring index.
-func (s *Server) InnerRingSize() int {
- size, err := s.statusIndex.InnerRingSize()
+func (s *Server) InnerRingSize(ctx context.Context) int {
+ size, err := s.statusIndex.InnerRingSize(ctx)
if err != nil {
- s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
+ s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
return 0
}
@@ -82,28 +83,28 @@ func (s *Server) InnerRingSize() int {
// AlphabetIndex is a getter for a global index of node in alphabet list.
// Negative index means that node is not in the alphabet list.
-func (s *Server) AlphabetIndex() int {
- index, err := s.statusIndex.AlphabetIndex()
+func (s *Server) AlphabetIndex(ctx context.Context) int {
+ index, err := s.statusIndex.AlphabetIndex(ctx)
if err != nil {
- s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
+ s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
return -1
}
return int(index)
}
-func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) error {
+func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
validators := prm.Validators
- index := s.InnerRingIndex()
+ index := s.InnerRingIndex(ctx)
if s.contracts.alphabet.indexOutOfRange(index) {
- s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
+ s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
return nil
}
if len(validators) == 0 {
- s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
+ s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
return nil
}
@@ -126,12 +127,12 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
}
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
- _, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
+ _, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
if err != nil {
- s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
+ s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
})
@@ -140,9 +141,9 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
// VoteForSidechainValidator calls vote method on alphabet contracts with
// the provided list of keys.
-func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) error {
+func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
sort.Sort(prm.Validators)
- return s.voteForSidechainValidator(prm)
+ return s.voteForSidechainValidator(ctx, prm)
}
// ResetEpochTimer resets the block timer that produces events to update epoch
@@ -153,17 +154,17 @@ func (s *Server) ResetEpochTimer(h uint32) error {
return s.epochTimer.Reset()
}
-func (s *Server) setHealthStatus(hs control.HealthStatus) {
+func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) {
s.healthStatus.Store(int32(hs))
- s.notifySystemd(hs)
+ s.notifySystemd(ctx, hs)
if s.irMetrics != nil {
s.irMetrics.SetHealth(int32(hs))
}
}
-func (s *Server) CompareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
+func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
- s.notifySystemd(newSt)
+ s.notifySystemd(ctx, newSt)
if s.irMetrics != nil {
s.irMetrics.SetHealth(int32(newSt))
}
@@ -186,7 +187,7 @@ func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, err
return persistStorage, nil
}
-func (s *Server) notifySystemd(st control.HealthStatus) {
+func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) {
if !s.sdNotify {
return
}
@@ -202,6 +203,6 @@ func (s *Server) notifySystemd(st control.HealthStatus) {
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
if err != nil {
- s.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+ s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}
diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go
index fe09f8f2d..f60ca87c4 100644
--- a/pkg/innerring/state_test.go
+++ b/pkg/innerring/state_test.go
@@ -1,6 +1,7 @@
package innerring
import (
+ "context"
"testing"
"time"
@@ -42,12 +43,12 @@ func TestServerState(t *testing.T) {
require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration")
var healthStatus control.HealthStatus = control.HealthStatus_READY
- srv.setHealthStatus(healthStatus)
+ srv.setHealthStatus(context.Background(), healthStatus)
require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status")
- require.True(t, srv.IsActive(), "invalid IsActive result")
- require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result")
- require.Equal(t, 0, srv.InnerRingIndex(), "invalid IR index")
- require.Equal(t, 1, srv.InnerRingSize(), "invalid IR index")
- require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index")
+ require.True(t, srv.IsActive(context.Background()), "invalid IsActive result")
+ require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result")
+ require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index")
+ require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index")
+ require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index")
}
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go
index c787f9d5e..a6c40f9fa 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go
@@ -58,7 +58,7 @@ func defaultCfg(c *cfg) {
},
fullSizeLimit: 1 << 30, // 1GB
objSizeLimit: 1 << 20, // 1MB
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
metrics: &NoopMetrics{},
}
}
@@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option {
// WithLogger returns an option to specify Blobovnicza's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))}
+ c.log = l
}
}
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
index caee770e8..95fdd844b 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
@@ -69,10 +69,10 @@ func TestBlobovnicza(t *testing.T) {
defer os.Remove(p)
// open Blobovnicza
- require.NoError(t, blz.Open())
+ require.NoError(t, blz.Open(context.Background()))
// initialize Blobovnicza
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Init(context.Background()))
// try to read non-existent address
testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound)
@@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) {
return err == nil
}, nil)
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go
index aeaa4e1d5..4947512cc 100644
--- a/pkg/local_object_storage/blobovnicza/control.go
+++ b/pkg/local_object_storage/blobovnicza/control.go
@@ -1,6 +1,7 @@
package blobovnicza
import (
+ "context"
"errors"
"fmt"
"path/filepath"
@@ -15,7 +16,7 @@ import (
//
// If the database file does not exist, it will be created automatically.
// If blobovnicza is already open, does nothing.
-func (b *Blobovnicza) Open() error {
+func (b *Blobovnicza) Open(ctx context.Context) error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -23,7 +24,7 @@ func (b *Blobovnicza) Open() error {
return nil
}
- b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
+ b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB,
zap.String("path", b.path),
zap.Bool("ro", b.boltOptions.ReadOnly),
)
@@ -37,7 +38,7 @@ func (b *Blobovnicza) Open() error {
}
}
- b.log.Debug(logs.BlobovniczaOpeningBoltDB,
+ b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB,
zap.String("path", b.path),
zap.Stringer("permissions", b.perm),
)
@@ -55,7 +56,7 @@ func (b *Blobovnicza) Open() error {
//
// If Blobovnicza is already initialized, no action is taken.
// Blobovnicza must be open, otherwise an error will return.
-func (b *Blobovnicza) Init() error {
+func (b *Blobovnicza) Init(ctx context.Context) error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -63,7 +64,7 @@ func (b *Blobovnicza) Init() error {
return errors.New("blobovnicza is not open")
}
- b.log.Debug(logs.BlobovniczaInitializing,
+ b.log.Debug(ctx, logs.BlobovniczaInitializing,
zap.Uint64("object size limit", b.objSizeLimit),
zap.Uint64("storage size limit", b.fullSizeLimit),
)
@@ -71,7 +72,7 @@ func (b *Blobovnicza) Init() error {
size := b.dataSize.Load()
items := b.itemsCount.Load()
if size != 0 || items != 0 {
- b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
+ b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
return nil
}
@@ -81,7 +82,7 @@ func (b *Blobovnicza) Init() error {
// create size range bucket
rangeStr := stringifyBounds(lower, upper)
- b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
+ b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange,
zap.String("range", rangeStr))
_, err := tx.CreateBucketIfNotExists(key)
@@ -98,14 +99,14 @@ func (b *Blobovnicza) Init() error {
}
}
- return b.initializeCounters()
+ return b.initializeCounters(ctx)
}
func (b *Blobovnicza) ObjectsCount() uint64 {
return b.itemsCount.Load()
}
-func (b *Blobovnicza) initializeCounters() error {
+func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
var size uint64
var items uint64
var sizeExists bool
@@ -128,20 +129,20 @@ func (b *Blobovnicza) initializeCounters() error {
})
})
if err != nil {
- return fmt.Errorf("can't determine DB size: %w", err)
+ return fmt.Errorf("determine DB size: %w", err)
}
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
- b.log.Debug(logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
+ b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
if err := saveDataSize(tx, size); err != nil {
return err
}
return saveItemsCount(tx, items)
}); err != nil {
- b.log.Debug(logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
- return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
+ b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
+ return fmt.Errorf("save blobovnicza's size and items count: %w", err)
}
- b.log.Debug(logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
+ b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
}
b.dataSize.Store(size)
@@ -154,7 +155,7 @@ func (b *Blobovnicza) initializeCounters() error {
// Close releases all internal database resources.
//
// If blobovnicza is already closed, does nothing.
-func (b *Blobovnicza) Close() error {
+func (b *Blobovnicza) Close(ctx context.Context) error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -162,7 +163,7 @@ func (b *Blobovnicza) Close() error {
return nil
}
- b.log.Debug(logs.BlobovniczaClosingBoltDB,
+ b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB,
zap.String("path", b.path),
)
diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go
index 5d6787897..8f24b5675 100644
--- a/pkg/local_object_storage/blobovnicza/delete.go
+++ b/pkg/local_object_storage/blobovnicza/delete.go
@@ -6,7 +6,6 @@ import (
"syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -91,10 +90,9 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
}
if err == nil && found {
- b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
+ b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
b.itemDeleted(recordSize)
}
diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go
index c464abc87..5a382c159 100644
--- a/pkg/local_object_storage/blobovnicza/get_test.go
+++ b/pkg/local_object_storage/blobovnicza/get_test.go
@@ -14,11 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
var blz *Blobovnicza
- defer func() { require.NoError(t, blz.Close()) }()
+ defer func() { require.NoError(t, blz.Close(context.Background())) }()
fnInit := func(szLimit uint64) {
if blz != nil {
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
}
blz = New(
@@ -26,8 +26,8 @@ func TestBlobovnicza_Get(t *testing.T) {
WithObjectSizeLimit(szLimit),
)
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
}
// initial distribution: [0:32K] (32K:64K]
diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go
index 01e5529da..cd33b263c 100644
--- a/pkg/local_object_storage/blobovnicza/iterate.go
+++ b/pkg/local_object_storage/blobovnicza/iterate.go
@@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes,
if prm.ignoreErrors {
return nil
}
- return fmt.Errorf("could not decode address key: %w", err)
+ return fmt.Errorf("decode address key: %w", err)
}
}
diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go
index 5db1e4165..717274781 100644
--- a/pkg/local_object_storage/blobovnicza/iterate_test.go
+++ b/pkg/local_object_storage/blobovnicza/iterate_test.go
@@ -15,8 +15,8 @@ import (
func TestBlobovniczaIterate(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
b := New(WithPath(filename))
- require.NoError(t, b.Open())
- require.NoError(t, b.Init())
+ require.NoError(t, b.Open(context.Background()))
+ require.NoError(t, b.Init(context.Background()))
data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}}
addr := oidtest.Address()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go
index 603c6abe3..dbaa7387a 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/active.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/active.go
@@ -1,6 +1,7 @@
package blobovniczatree
import (
+ "context"
"path/filepath"
"sync"
@@ -17,8 +18,8 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
return db.blz
}
-func (db *activeDB) Close() {
- db.shDB.Close()
+func (db *activeDB) Close(ctx context.Context) {
+ db.shDB.Close(ctx)
}
func (db *activeDB) SystemPath() string {
@@ -53,8 +54,8 @@ func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager
// GetOpenedActiveDBForLevel returns active DB for level.
// DB must be closed after use.
-func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) {
- activeDB, err := m.getCurrentActiveIfOk(lvlPath)
+func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) {
+ activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath)
if err != nil {
return nil, err
}
@@ -62,7 +63,7 @@ func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB,
return activeDB, nil
}
- return m.updateAndGetActive(lvlPath)
+ return m.updateAndGetActive(ctx, lvlPath)
}
func (m *activeDBManager) Open() {
@@ -72,18 +73,18 @@ func (m *activeDBManager) Open() {
m.closed = false
}
-func (m *activeDBManager) Close() {
+func (m *activeDBManager) Close(ctx context.Context) {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
for _, db := range m.levelToActiveDB {
- db.Close()
+ db.Close(ctx)
}
m.levelToActiveDB = make(map[string]*sharedDB)
m.closed = true
}
-func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) {
+func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) {
m.levelToActiveDBGuard.RLock()
defer m.levelToActiveDBGuard.RUnlock()
@@ -96,13 +97,13 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error
return nil, nil
}
- blz, err := db.Open() // open db for usage, will be closed on activeDB.Close()
+ blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close()
if err != nil {
return nil, err
}
if blz.IsFull() {
- db.Close()
+ db.Close(ctx)
return nil, nil
}
@@ -112,11 +113,11 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error
}, nil
}
-func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) {
+func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) {
m.levelLock.Lock(lvlPath)
defer m.levelLock.Unlock(lvlPath)
- current, err := m.getCurrentActiveIfOk(lvlPath)
+ current, err := m.getCurrentActiveIfOk(ctx, lvlPath)
if err != nil {
return nil, err
}
@@ -124,7 +125,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
return current, nil
}
- nextShDB, err := m.getNextSharedDB(lvlPath)
+ nextShDB, err := m.getNextSharedDB(ctx, lvlPath)
if err != nil {
return nil, err
}
@@ -133,7 +134,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
return nil, nil
}
- blz, err := nextShDB.Open() // open db for client, client must call Close() after usage
+ blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage
if err != nil {
return nil, err
}
@@ -143,7 +144,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
}, nil
}
-func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
+func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) {
var nextActiveDBIdx uint64
hasActive, currentIdx := m.hasActiveDB(lvlPath)
if hasActive {
@@ -160,17 +161,17 @@ func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
next := m.dbManager.GetByPath(path)
- _, err := next.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
+ _, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
if err != nil {
return nil, err
}
previous, updated := m.replace(lvlPath, next)
if !updated && next != nil {
- next.Close() // manager is closed, so don't hold active DB open
+ next.Close(ctx) // manager is closed, so don't hold active DB open
}
if updated && previous != nil {
- previous.Close()
+ previous.Close(ctx)
}
return next, nil
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
index c909113c7..3e8b9f07b 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
@@ -158,16 +158,16 @@ func (b *Blobovniczas) Path() string {
}
// SetCompressor implements common.Storage.
-func (b *Blobovniczas) SetCompressor(cc *compression.Config) {
+func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) {
b.compression = cc
}
-func (b *Blobovniczas) Compressor() *compression.Config {
+func (b *Blobovniczas) Compressor() *compression.Compressor {
return b.compression
}
// SetReportErrorFunc implements common.Storage.
-func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) {
+func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) {
b.reportError = f
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
index 5c103c1bb..04ff5120c 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
@@ -32,7 +32,7 @@ func newDBCache(parentCtx context.Context, size int,
ch := cache.NewCache[string, *sharedDB]().
WithTTL(ttl).WithLRU().WithMaxKeys(size).
WithOnEvicted(func(_ string, db *sharedDB) {
- db.Close()
+ db.Close(parentCtx)
})
ctx, cancel := context.WithCancel(parentCtx)
res := &dbCache{
@@ -81,12 +81,12 @@ func (c *dbCache) Close() {
c.closed = true
}
-func (c *dbCache) GetOrCreate(path string) *sharedDB {
+func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB {
value := c.getExisted(path)
if value != nil {
return value
}
- return c.create(path)
+ return c.create(ctx, path)
}
func (c *dbCache) EvictAndMarkNonCached(path string) {
@@ -122,7 +122,7 @@ func (c *dbCache) getExisted(path string) *sharedDB {
return nil
}
-func (c *dbCache) create(path string) *sharedDB {
+func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
c.pathLock.Lock(path)
defer c.pathLock.Unlock(path)
@@ -133,12 +133,12 @@ func (c *dbCache) create(path string) *sharedDB {
value = c.dbManager.GetByPath(path)
- _, err := value.Open() // open db to hold reference, closed by evictedDB.Close() or if cache closed
+ _, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed
if err != nil {
return value
}
if added := c.put(path, value); !added {
- value.Close()
+ value.Close(ctx)
}
return value
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
index cc8a52d03..f87f4a144 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
@@ -19,7 +19,8 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
st := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(10),
WithBlobovniczaShallowDepth(1),
@@ -27,7 +28,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
require.NoError(t, st.Open(mode.ComponentReadWrite))
require.NoError(t, st.Init())
defer func() {
- require.NoError(t, st.Close())
+ require.NoError(t, st.Close(context.Background()))
}()
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
index 681cf876c..a6c1ce368 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
@@ -24,10 +24,10 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
//
// Should be called exactly once.
func (b *Blobovniczas) Init() error {
- b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
+ b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas)
if b.readOnly {
- b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
+ b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
return nil
}
@@ -41,35 +41,34 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
}
eg, egCtx := errgroup.WithContext(ctx)
- eg.SetLimit(b.blzInitWorkerCount)
- err = b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) {
- eg.Go(func() error {
- p = strings.TrimSuffix(p, rebuildSuffix)
- shBlz := b.getBlobovniczaWithoutCaching(p)
- blz, err := shBlz.Open()
- if err != nil {
- return err
- }
- defer shBlz.Close()
-
- moveInfo, err := blz.ListMoveInfo(egCtx)
- if err != nil {
- return err
- }
- for _, move := range moveInfo {
- b.deleteProtectedObjects.Add(move.Address)
- }
-
- b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
- return nil
- })
- return false, nil
- })
- if err != nil {
- _ = eg.Wait()
- return err
+ if b.blzInitWorkerCount > 0 {
+ eg.SetLimit(b.blzInitWorkerCount + 1)
}
+ eg.Go(func() error {
+ return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) {
+ eg.Go(func() error {
+ p = strings.TrimSuffix(p, rebuildSuffix)
+ shBlz := b.getBlobovniczaWithoutCaching(p)
+ blz, err := shBlz.Open(egCtx)
+ if err != nil {
+ return err
+ }
+ defer shBlz.Close(egCtx)
+ moveInfo, err := blz.ListMoveInfo(egCtx)
+ if err != nil {
+ return err
+ }
+ for _, move := range moveInfo {
+ b.deleteProtectedObjects.Add(move.Address)
+ }
+
+ b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
+ return nil
+ })
+ return false, nil
+ })
+ })
return eg.Wait()
}
@@ -80,9 +79,9 @@ func (b *Blobovniczas) openManagers() {
}
// Close implements common.Storage.
-func (b *Blobovniczas) Close() error {
+func (b *Blobovniczas) Close(ctx context.Context) error {
b.dbCache.Close() // order important
- b.activeDBManager.Close()
+ b.activeDBManager.Close(ctx)
b.commondbManager.Close()
return nil
@@ -91,8 +90,8 @@ func (b *Blobovniczas) Close() error {
// returns blobovnicza with path p
//
// If blobovnicza is already cached, instance from cache is returned w/o changes.
-func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB {
- return b.dbCache.GetOrCreate(p)
+func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB {
+ return b.dbCache.GetOrCreate(ctx, p)
}
func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
index f0a32ded1..7db1891f9 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
@@ -2,6 +2,9 @@ package blobovniczatree
import (
"context"
+ "os"
+ "path"
+ "strconv"
"testing"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -51,7 +54,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, obj35, gRes.Object)
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
// change depth and width
blz = NewBlobovniczaTree(
@@ -89,7 +92,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
})
require.NoError(t, err)
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
// change depth and width back
blz = NewBlobovniczaTree(
@@ -127,5 +130,36 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, obj52, gRes.Object)
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
+}
+
+func TestInitBlobovniczasInitErrorType(t *testing.T) {
+ t.Parallel()
+
+ rootDir := t.TempDir()
+
+ for idx := 0; idx < 10; idx++ {
+ f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"))
+ require.NoError(t, err)
+ _, err = f.Write([]byte("invalid db"))
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+
+ f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix))
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+ }
+
+ blz := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaShallowDepth(1),
+ WithBlobovniczaShallowWidth(1),
+ WithRootPath(rootDir),
+ )
+
+ require.NoError(t, blz.Open(mode.ComponentReadWrite))
+ err := blz.Init()
+ require.Contains(t, err.Error(), "open blobovnicza")
+ require.Contains(t, err.Error(), "invalid database")
+ require.NoError(t, blz.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go
index cf91637d7..b83849c77 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/count.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/count.go
@@ -16,17 +16,17 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) {
b.metrics.ObjectsCount(time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
defer span.End()
var result uint64
err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
shDB := b.getBlobovniczaWithoutCaching(p)
- blz, err := shDB.Open()
+ blz, err := shDB.Open(ctx)
if err != nil {
return true, err
}
- defer shDB.Close()
+ defer shDB.Close(ctx)
result += blz.ObjectsCount()
return false, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
index 298de3ad6..d096791c3 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -19,7 +18,10 @@ import (
"go.uber.org/zap"
)
-var errObjectIsDeleteProtected = errors.New("object is delete protected")
+var (
+ errObjectIsDeleteProtected = errors.New("object is delete protected")
+ deleteRes = common.DeleteRes{}
+)
// Delete deletes object from blobovnicza tree.
//
@@ -43,17 +45,17 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
defer span.End()
if b.readOnly {
- return common.DeleteRes{}, common.ErrReadOnly
+ return deleteRes, common.ErrReadOnly
}
if b.rebuildGuard.TryRLock() {
defer b.rebuildGuard.RUnlock()
} else {
- return common.DeleteRes{}, errRebuildInProgress
+ return deleteRes, errRebuildInProgress
}
if b.deleteProtectedObjects.Contains(prm.Address) {
- return common.DeleteRes{}, errObjectIsDeleteProtected
+ return deleteRes, errObjectIsDeleteProtected
}
var bPrm blobovnicza.DeletePrm
@@ -61,12 +63,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(id.Path())
- blz, err := shBlz.Open()
+ shBlz := b.getBlobovnicza(ctx, id.Path())
+ blz, err := shBlz.Open(ctx)
if err != nil {
return res, err
}
- defer shBlz.Close()
+ defer shBlz.Close(ctx)
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
success = true
@@ -80,10 +82,9 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
+ b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
zap.String("level", p),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
+ zap.Error(err),
)
}
}
@@ -98,7 +99,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if err == nil && !objectFound {
// not found in any blobovnicza
- return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
success = err == nil
@@ -109,12 +110,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
//
// returns no error if object was removed from some blobovnicza of the same level.
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
- shBlz := b.getBlobovnicza(blzPath)
- blz, err := shBlz.Open()
+ shBlz := b.getBlobovnicza(ctx, blzPath)
+ blz, err := shBlz.Open(ctx)
if err != nil {
- return common.DeleteRes{}, err
+ return deleteRes, err
}
- defer shBlz.Close()
+ defer shBlz.Close(ctx)
return b.deleteObject(ctx, blz, prm)
}
@@ -122,5 +123,5 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz
// removes object from blobovnicza and returns common.DeleteRes.
func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) {
_, err := blz.Delete(ctx, prm)
- return common.DeleteRes{}, err
+ return deleteRes, err
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
index a64b2bbb1..0c5e48821 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"go.opentelemetry.io/otel/attribute"
@@ -37,12 +36,12 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(id.Path())
- blz, err := shBlz.Open()
+ shBlz := b.getBlobovnicza(ctx, id.Path())
+ blz, err := shBlz.Open(ctx)
if err != nil {
return common.ExistsRes{}, err
}
- defer shBlz.Close()
+ defer shBlz.Close(ctx)
exists, err := blz.Exists(ctx, prm.Address)
return common.ExistsRes{Exists: exists}, err
@@ -55,10 +54,9 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
_, err := b.getObjectFromLevel(ctx, gPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
}
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
index d6ffd8bce..df2b4ffe5 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
@@ -19,7 +19,8 @@ func TestExistsInvalidStorageID(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@@ -27,7 +28,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
WithBlobovniczaSize(1<<20))
require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
- defer func() { require.NoError(t, b.Close()) }()
+ defer func() { require.NoError(t, b.Close(context.Background())) }()
obj := blobstortest.NewObject(1024)
addr := object.AddressOf(obj)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
index d390ecf1d..9244d765c 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
@@ -15,7 +15,8 @@ func TestGeneric(t *testing.T) {
helper := func(t *testing.T, dir string) common.Storage {
return NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@@ -43,7 +44,8 @@ func TestControl(t *testing.T) {
newTree := func(t *testing.T) common.Storage {
return NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
index 08cacda8a..e5c83e5f2 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -48,12 +47,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(id.Path())
- blz, err := shBlz.Open()
+ shBlz := b.getBlobovnicza(ctx, id.Path())
+ blz, err := shBlz.Open(ctx)
if err != nil {
return res, err
}
- defer shBlz.Close()
+ defer shBlz.Close(ctx)
res, err = b.getObject(ctx, blz, bPrm)
if err == nil {
@@ -67,10 +66,9 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
res, err = b.getObjectFromLevel(ctx, bPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
+ zap.Error(err),
)
}
}
@@ -95,12 +93,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
// open blobovnicza (cached inside)
- shBlz := b.getBlobovnicza(blzPath)
- blz, err := shBlz.Open()
+ shBlz := b.getBlobovnicza(ctx, blzPath)
+ blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRes{}, err
}
- defer shBlz.Close()
+ defer shBlz.Close(ctx)
return b.getObject(ctx, blz, prm)
}
@@ -115,13 +113,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
- return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
+ return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
+ return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
index d237ae439..27d13f4f3 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
@@ -11,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -47,12 +46,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(id.Path())
- blz, err := shBlz.Open()
+ shBlz := b.getBlobovnicza(ctx, id.Path())
+ blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRangeRes{}, err
}
- defer shBlz.Close()
+ defer shBlz.Close(ctx)
res, err := b.getObjectRange(ctx, blz, prm)
if err == nil {
@@ -69,10 +68,9 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if err != nil {
outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !client.IsErrObjectNotFound(err) {
- b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
}
if outOfBounds {
return true, err
@@ -103,12 +101,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
// open blobovnicza (cached inside)
- shBlz := b.getBlobovnicza(blzPath)
- blz, err := shBlz.Open()
+ shBlz := b.getBlobovnicza(ctx, blzPath)
+ blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRangeRes{}, err
}
- defer shBlz.Close()
+ defer shBlz.Close(ctx)
return b.getObjectRange(ctx, blz, prm)
}
@@ -130,13 +128,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
- return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err)
+ return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
+ return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
from := prm.Range.GetOffset()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
index f6acb46aa..ceb8fb7e3 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
@@ -42,14 +42,14 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
data, err := b.compression.Decompress(elem.ObjectData())
if err != nil {
if prm.IgnoreErrors {
- b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+ b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", elem.Address()),
- zap.String("err", err.Error()),
+ zap.Error(err),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
return nil
}
- return fmt.Errorf("could not decompress object data: %w", err)
+ return fmt.Errorf("decompress object data: %w", err)
}
if prm.Handler != nil {
@@ -72,19 +72,19 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
// iterator over all Blobovniczas in unsorted order. Break on f's error return.
func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
- shBlz := b.getBlobovnicza(p)
- blz, err := shBlz.Open()
+ shBlz := b.getBlobovnicza(ctx, p)
+ blz, err := shBlz.Open(ctx)
if err != nil {
if ignoreErrors {
- b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
- zap.String("err", err.Error()),
+ b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.Error(err),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
return false, nil
}
- return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
+ return false, fmt.Errorf("open blobovnicza %s: %w", p, err)
}
- defer shBlz.Close()
+ defer shBlz.Close(ctx)
err = f(p, blz)
@@ -249,6 +249,12 @@ func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Addres
}
func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ default:
+ }
+
sysPath := filepath.Join(b.rootPath, path)
entries, err := os.ReadDir(sysPath)
if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
index 4fdde15a9..6438f715b 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
@@ -1,6 +1,7 @@
package blobovniczatree
import (
+ "context"
"errors"
"fmt"
"os"
@@ -48,7 +49,7 @@ func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
}
}
-func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
+func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
if b.closedFlag.Load() {
return nil, errClosed
}
@@ -67,11 +68,11 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
blobovnicza.WithMetrics(b.metrics),
)...)
- if err := blz.Open(); err != nil {
- return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
+ if err := blz.Open(ctx); err != nil {
+ return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err)
}
- if err := blz.Init(); err != nil {
- return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
+ if err := blz.Init(ctx); err != nil {
+ return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err)
}
b.refCount++
@@ -81,22 +82,22 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
return blz, nil
}
-func (b *sharedDB) Close() {
+func (b *sharedDB) Close(ctx context.Context) {
b.cond.L.Lock()
defer b.cond.L.Unlock()
if b.refCount == 0 {
- b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
+ b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
b.cond.Broadcast()
return
}
if b.refCount == 1 {
b.refCount = 0
- if err := b.blcza.Close(); err != nil {
- b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+ if err := b.blcza.Close(ctx); err != nil {
+ b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
b.blcza = nil
@@ -110,7 +111,7 @@ func (b *sharedDB) Close() {
}
}
-func (b *sharedDB) CloseAndRemoveFile() error {
+func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
b.cond.L.Lock()
if b.refCount > 1 {
b.cond.Wait()
@@ -121,12 +122,12 @@ func (b *sharedDB) CloseAndRemoveFile() error {
return errClosingClosedBlobovnicza
}
- if err := b.blcza.Close(); err != nil {
- b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+ if err := b.blcza.Close(ctx); err != nil {
+ b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
- return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err)
+ return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err)
}
b.refCount = 0
@@ -140,8 +141,8 @@ func (b *sharedDB) SystemPath() string {
return b.path
}
-// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
-type levelDbManager struct {
+// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
+type levelDBManager struct {
dbMtx *sync.RWMutex
databases map[uint64]*sharedDB
@@ -156,8 +157,8 @@ type levelDbManager struct {
func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string,
readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger,
-) *levelDbManager {
- result := &levelDbManager{
+) *levelDBManager {
+ result := &levelDBManager{
databases: make(map[uint64]*sharedDB),
dbMtx: &sync.RWMutex{},
@@ -172,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st
return result
}
-func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
+func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB {
res := m.getDBIfExists(idx)
if res != nil {
return res
@@ -180,14 +181,14 @@ func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
return m.getOrCreateDB(idx)
}
-func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB {
+func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB {
m.dbMtx.RLock()
defer m.dbMtx.RUnlock()
return m.databases[idx]
}
-func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB {
+func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB {
m.dbMtx.Lock()
defer m.dbMtx.Unlock()
@@ -201,7 +202,7 @@ func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB {
return db
}
-func (m *levelDbManager) hasAnyDB() bool {
+func (m *levelDBManager) hasAnyDB() bool {
m.dbMtx.RLock()
defer m.dbMtx.RUnlock()
@@ -212,7 +213,7 @@ func (m *levelDbManager) hasAnyDB() bool {
//
// The blobovnicza opens at the first request, closes after the last request.
type dbManager struct {
- levelToManager map[string]*levelDbManager
+ levelToManager map[string]*levelDBManager
levelToManagerGuard *sync.RWMutex
closedFlag *atomic.Bool
dbCounter *openDBCounter
@@ -230,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool,
options: options,
readOnly: readOnly,
metrics: metrics,
- levelToManager: make(map[string]*levelDbManager),
+ levelToManager: make(map[string]*levelDBManager),
levelToManagerGuard: &sync.RWMutex{},
log: log,
closedFlag: &atomic.Bool{},
@@ -265,7 +266,7 @@ func (m *dbManager) Close() {
m.dbCounter.WaitUntilAllClosed()
}
-func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
+func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
result := m.getLevelManagerIfExists(lvlPath)
if result != nil {
return result
@@ -273,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
return m.getOrCreateLevelManager(lvlPath)
}
-func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager {
+func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager {
m.levelToManagerGuard.RLock()
defer m.levelToManagerGuard.RUnlock()
return m.levelToManager[lvlPath]
}
-func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager {
+func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager {
m.levelToManagerGuard.Lock()
defer m.levelToManagerGuard.Unlock()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
index 008be9543..5f268b0f2 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
@@ -1,6 +1,7 @@
package blobovniczatree
import (
+ "context"
"io/fs"
"time"
@@ -18,9 +19,9 @@ type cfg struct {
openedCacheSize int
blzShallowDepth uint64
blzShallowWidth uint64
- compression *compression.Config
+ compression *compression.Compressor
blzOpts []blobovnicza.Option
- reportError func(string, error) // reportError is the function called when encountering disk errors.
+ reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors.
metrics Metrics
waitBeforeDropDB time.Duration
blzInitWorkerCount int
@@ -47,14 +48,14 @@ const (
func initConfig(c *cfg) {
*c = cfg{
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
perm: defaultPerm,
openedCacheSize: defaultOpenedCacheSize,
openedCacheTTL: defaultOpenedCacheTTL,
openedCacheExpInterval: defaultOpenedCacheInterval,
blzShallowDepth: defaultBlzShallowDepth,
blzShallowWidth: defaultBlzShallowWidth,
- reportError: func(string, error) {},
+ reportError: func(context.Context, string, error) {},
metrics: &noopMetrics{},
waitBeforeDropDB: defaultWaitBeforeDropDB,
blzInitWorkerCount: defaultBlzInitWorkerCount,
@@ -62,10 +63,15 @@ func initConfig(c *cfg) {
}
}
-func WithLogger(l *logger.Logger) Option {
+func WithBlobovniczaTreeLogger(log *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
- c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l))
+ c.log = log
+ }
+}
+
+func WithBlobovniczaLogger(log *logger.Logger) Option {
+ return func(c *cfg) {
+ c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log))
}
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
index 76c4953e4..37c49d741 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -77,37 +76,34 @@ type putIterator struct {
}
func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
- active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
+ active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
if err != nil {
if !isLogical(err) {
- i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
- i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
+ zap.Error(err))
}
return false, nil
}
if active == nil {
- i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
return false, nil
}
- defer active.Close()
+ defer active.Close(ctx)
i.AllFull = false
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
if err != nil {
if !isLogical(err) {
- i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
- i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
+ i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", active.SystemPath()),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
}
if errors.Is(err, blobovnicza.ErrNoSpace) {
i.AllFull = true
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index 202d38cd7..a840275b8 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -49,25 +49,25 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
var res common.RebuildRes
- b.log.Debug(logs.BlobovniczaTreeCompletingPreviousRebuild)
- completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage)
+ b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
+ completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter)
res.ObjectsMoved += completedPreviosMoves
if err != nil {
- b.log.Warn(logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
success = false
return res, err
}
- b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
+ b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
- b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
+ b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild)
dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
if err != nil {
- b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
success = false
return res, err
}
- b.log.Info(logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
+ b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
if err != nil {
success = false
@@ -78,14 +78,14 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
var completedDBCount uint32
for _, db := range dbs {
- b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
- movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter)
+ b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
+ movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter)
res.ObjectsMoved += movedObjects
if err != nil {
- b.log.Warn(logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
return res, err
}
- b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
+ b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
res.FilesRemoved++
completedDBCount++
b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
@@ -165,7 +165,7 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
continue
}
path := filepath.Join(lvlPath, e.Name())
- resettlementRequired, err := b.rebuildBySize(path, target)
+ resettlementRequired, err := b.rebuildBySize(ctx, path, target)
if err != nil {
return false, err
}
@@ -180,13 +180,13 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
return result, nil
}
-func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, error) {
- shDB := b.getBlobovnicza(path)
- blz, err := shDB.Open()
+func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) {
+ shDB := b.getBlobovnicza(ctx, path)
+ blz, err := shDB.Open(ctx)
if err != nil {
return false, err
}
- defer shDB.Close()
+ defer shDB.Close(ctx)
fp := blz.FillPercent()
// accepted fill percent defines as
// |----|+++++++++++++++++|+++++++++++++++++|---------------
@@ -195,9 +195,9 @@ func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool,
return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil
}
-func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
- shDB := b.getBlobovnicza(path)
- blz, err := shDB.Open()
+func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) {
+ shDB := b.getBlobovnicza(ctx, path)
+ blz, err := shDB.Open(ctx)
if err != nil {
return 0, err
}
@@ -206,13 +206,13 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
if shDBClosed {
return
}
- shDB.Close()
+ shDB.Close(ctx)
}()
- dropTempFile, err := b.addRebuildTempFile(path)
+ dropTempFile, err := b.addRebuildTempFile(ctx, path)
if err != nil {
return 0, err
}
- migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter)
+ migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter)
if err != nil {
return migratedObjects, err
}
@@ -224,21 +224,21 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
return migratedObjects, err
}
-func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
+func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) {
sysPath := filepath.Join(b.rootPath, path)
- sysPath = sysPath + rebuildSuffix
+ sysPath += rebuildSuffix
_, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
if err != nil {
return nil, err
}
return func() {
if err := os.Remove(sysPath); err != nil {
- b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
}
}, nil
}
-func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
+func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) {
var result atomic.Uint64
batch := make(map[oid.Address][]byte)
@@ -253,7 +253,12 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
})
for {
- _, err := blz.Iterate(ctx, prm)
+ release, err := limiter.ReadRequest(ctx)
+ if err != nil {
+ return result.Load(), err
+ }
+ _, err = blz.Iterate(ctx, prm)
+ release()
if err != nil && !errors.Is(err, errBatchFull) {
return result.Load(), err
}
@@ -265,13 +270,19 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
eg, egCtx := errgroup.WithContext(ctx)
for addr, data := range batch {
- if err := limiter.AcquireWorkSlot(egCtx); err != nil {
+ release, err := limiter.AcquireWorkSlot(egCtx)
+ if err != nil {
_ = eg.Wait()
return result.Load(), err
}
eg.Go(func() error {
- defer limiter.ReleaseWorkSlot()
- err := b.moveObject(egCtx, blz, blzPath, addr, data, meta)
+ defer release()
+ moveRelease, err := limiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ err = b.moveObject(egCtx, blz, blzPath, addr, data, meta)
+ moveRelease()
if err == nil {
result.Add(1)
}
@@ -317,7 +328,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo
return nil
}
-func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) {
+func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
@@ -330,7 +341,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB)
b.dbFilesGuard.Lock()
defer b.dbFilesGuard.Unlock()
- if err := shDb.CloseAndRemoveFile(); err != nil {
+ if err := shDB.CloseAndRemoveFile(ctx); err != nil {
return false, err
}
b.commondbManager.CleanResources(path)
@@ -359,26 +370,37 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error {
return b.dropDirectoryIfEmpty(filepath.Dir(path))
}
-func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) {
+func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) {
var count uint64
var rebuildTempFilesToRemove []string
err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
rebuildTmpFilePath := s
s = strings.TrimSuffix(s, rebuildSuffix)
- shDB := b.getBlobovnicza(s)
- blz, err := shDB.Open()
+ shDB := b.getBlobovnicza(ctx, s)
+ blz, err := shDB.Open(ctx)
if err != nil {
return true, err
}
- defer shDB.Close()
+ defer shDB.Close(ctx)
+ release, err := rateLimiter.ReadRequest(ctx)
+ if err != nil {
+ return false, err
+ }
incompletedMoves, err := blz.ListMoveInfo(ctx)
+ release()
if err != nil {
return true, err
}
for _, move := range incompletedMoves {
- if err := b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore); err != nil {
+ release, err := rateLimiter.WriteRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore)
+ release()
+ if err != nil {
return true, err
}
count++
@@ -388,9 +410,14 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
return false, nil
})
for _, tmp := range rebuildTempFilesToRemove {
- if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
- b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ release, err := rateLimiter.WriteRequest(ctx)
+ if err != nil {
+ return count, err
}
+ if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ }
+ release()
}
return count, err
}
@@ -398,12 +425,12 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
move blobovnicza.MoveInfo, metaStore common.MetaStorage,
) error {
- targetDB := b.getBlobovnicza(NewIDFromBytes(move.TargetStorageID).Path())
- target, err := targetDB.Open()
+ targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path())
+ target, err := targetDB.Open(ctx)
if err != nil {
return err
}
- defer targetDB.Close()
+ defer targetDB.Close(ctx)
existsInSource := true
var gPrm blobovnicza.GetPrm
@@ -413,14 +440,14 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
if client.IsErrObjectNotFound(err) {
existsInSource = false
} else {
- b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
return err
}
}
if !existsInSource { // object was deleted by Rebuild, need to delete move info
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
- b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
return err
}
b.deleteProtectedObjects.Delete(move.Address)
@@ -429,7 +456,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
existsInTarget, err := target.Exists(ctx, move.Address)
if err != nil {
- b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
return err
}
@@ -439,25 +466,25 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
putPrm.SetMarshaledObject(gRes.Object())
_, err = target.Put(ctx, putPrm)
if err != nil {
- b.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
return err
}
}
if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
- b.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
return err
}
var deletePrm blobovnicza.DeletePrm
deletePrm.SetAddress(move.Address)
if _, err = source.Delete(ctx, deletePrm); err != nil {
- b.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
return err
}
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
- b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
return err
}
@@ -477,21 +504,21 @@ type moveIterator struct {
}
func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) {
- target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
+ target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
if err != nil {
if !isLogical(err) {
- i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
- i.B.log.Warn(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
}
return false, nil
}
if target == nil {
- i.B.log.Warn(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
+ i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
return false, nil
}
- defer target.Close()
+ defer target.Close(ctx)
i.AllFull = false
@@ -503,9 +530,9 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
TargetStorageID: targetStorageID.Bytes(),
}); err != nil {
if !isLogical(err) {
- i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
} else {
- i.B.log.Warn(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}
@@ -519,15 +546,15 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
_, err = target.Blobovnicza().Put(ctx, putPrm)
if err != nil {
if !isLogical(err) {
- i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
- i.B.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
}
return true, nil
}
if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
- i.B.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
return true, nil
}
@@ -535,18 +562,18 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
deletePrm.SetAddress(i.Address)
if _, err = i.Source.Delete(ctx, deletePrm); err != nil {
if !isLogical(err) {
- i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err)
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err)
} else {
- i.B.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}
if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil {
if !isLogical(err) {
- i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err)
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err)
} else {
- i.B.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
index b177d20fc..4146ef260 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
@@ -35,8 +35,8 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@@ -53,7 +53,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
@@ -65,8 +65,8 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@@ -83,19 +83,19 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
_, err = blz.Put(context.Background(), pPrm)
require.NoError(t, err)
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
testRebuildFailoverValidate(t, dir, obj, true)
}
@@ -105,8 +105,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@@ -117,14 +117,14 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
var pPrm blobovnicza.PutPrm
pPrm.SetAddress(object.AddressOf(obj))
@@ -132,7 +132,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
_, err = blz.Put(context.Background(), pPrm)
require.NoError(t, err)
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
testRebuildFailoverValidate(t, dir, obj, false)
}
@@ -140,7 +140,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) {
b := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(2048),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@@ -161,20 +162,22 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
storageIDs: make(map[oid.Address][]byte),
guard: &sync.Mutex{},
}
+ limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- WorkerLimiter: &rebuildLimiterStub{},
- FillPercent: 1,
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 1,
})
require.NoError(t, err)
require.Equal(t, uint64(1), rRes.ObjectsMoved)
require.Equal(t, uint64(0), rRes.FilesRemoved)
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
moveInfo, err := blz.ListMoveInfo(context.Background())
require.NoError(t, err)
@@ -185,11 +188,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
_, err = blz.Get(context.Background(), gPrm)
require.True(t, client.IsErrObjectNotFound(err))
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
moveInfo, err = blz.ListMoveInfo(context.Background())
require.NoError(t, err)
@@ -203,7 +206,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)]))
}
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
_, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
require.True(t, os.IsNotExist(err))
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index dfd928aaf..a7a99fec3 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -2,7 +2,9 @@ package blobovniczatree
import (
"context"
+ "fmt"
"sync"
+ "sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -48,7 +50,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
@@ -61,7 +64,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Init())
storageIDs := make(map[oid.Address][]byte)
- for i := 0; i < 100; i++ {
+ for range 100 {
obj := blobstortest.NewObject(64 * 1024) // 64KB object
data, err := obj.Marshal()
require.NoError(t, err)
@@ -76,10 +79,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
+ limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- WorkerLimiter: &rebuildLimiterStub{},
- FillPercent: 60,
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 60,
})
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -93,7 +97,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
})
t.Run("no rebuild single db", func(t *testing.T) {
@@ -102,7 +107,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
@@ -128,10 +134,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
+ limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- WorkerLimiter: &rebuildLimiterStub{},
- FillPercent: 90, // 64KB / 100KB = 64%
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 90, // 64KB / 100KB = 64%
})
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -145,7 +152,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
})
t.Run("rebuild by fill percent", func(t *testing.T) {
@@ -154,7 +162,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
@@ -168,7 +177,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs := make(map[oid.Address][]byte)
toDelete := make(map[oid.Address][]byte)
- for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
obj := blobstortest.NewObject(64 * 1024)
data, err := obj.Marshal()
require.NoError(t, err)
@@ -193,10 +202,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
+ limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- WorkerLimiter: &rebuildLimiterStub{},
- FillPercent: 80,
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 80,
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
@@ -214,7 +224,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
})
t.Run("rebuild by overflow", func(t *testing.T) {
@@ -223,7 +234,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
@@ -236,7 +248,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Init())
storageIDs := make(map[oid.Address][]byte)
- for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
obj := blobstortest.NewObject(64 * 1024)
data, err := obj.Marshal()
require.NoError(t, err)
@@ -251,10 +263,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1),
WithBlobovniczaShallowDepth(1),
@@ -266,10 +279,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
+ limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- WorkerLimiter: &rebuildLimiterStub{},
- FillPercent: 80,
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 80,
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
@@ -284,7 +298,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
})
}
@@ -294,7 +309,8 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024), // 64KB object size limit
WithBlobovniczaShallowWidth(5),
WithBlobovniczaShallowDepth(2), // depth = 2
@@ -318,11 +334,12 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
storageIDs := make(map[oid.Address][]byte)
storageIDs[prm.Address] = res.StorageID
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(32*1024), // 32KB object size limit
WithBlobovniczaShallowWidth(5),
WithBlobovniczaShallowDepth(3), // depth = 3
@@ -338,9 +355,10 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
+ limiter := &rebuildLimiterStub{}
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
- rPrm.WorkerLimiter = &rebuildLimiterStub{}
+ rPrm.Limiter = limiter
rPrm.FillPercent = 1
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
@@ -355,14 +373,16 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
}
func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(2048),
WithBlobovniczaShallowWidth(sourceWidth),
WithBlobovniczaShallowDepth(sourceDepth),
@@ -399,11 +419,12 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
}
require.NoError(t, eg.Wait())
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
- WithLogger(test.NewLogger(t)),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(2048),
WithBlobovniczaShallowWidth(targetWidth),
WithBlobovniczaShallowDepth(targetDepth),
@@ -427,9 +448,10 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
+ limiter := &rebuildLimiterStub{}
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
- rPrm.WorkerLimiter = &rebuildLimiterStub{}
+ rPrm.Limiter = limiter
rPrm.FillPercent = 1
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
@@ -444,7 +466,8 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
require.NoError(t, err)
}
- require.NoError(t, b.Close())
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
}
type storageIDUpdateStub struct {
@@ -462,7 +485,36 @@ func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Addr
return nil
}
-type rebuildLimiterStub struct{}
+type rebuildLimiterStub struct {
+ slots atomic.Int64
+ readRequests atomic.Int64
+ writeRequests atomic.Int64
+}
-func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) error { return nil }
-func (s *rebuildLimiterStub) ReleaseWorkSlot() {}
+func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) {
+ s.slots.Add(1)
+ return func() { s.slots.Add(-1) }, nil
+}
+
+func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) {
+ s.readRequests.Add(1)
+ return func() { s.readRequests.Add(-1) }, nil
+}
+
+func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) {
+ s.writeRequests.Add(1)
+ return func() { s.writeRequests.Add(-1) }, nil
+}
+
+func (s *rebuildLimiterStub) ValidateReleased() error {
+ if v := s.slots.Load(); v != 0 {
+ return fmt.Errorf("invalid slots value %d", v)
+ }
+ if v := s.readRequests.Load(); v != 0 {
+ return fmt.Errorf("invalid read requests value %d", v)
+ }
+ if v := s.writeRequests.Load(); v != 0 {
+ return fmt.Errorf("invalid write requests value %d", v)
+ }
+ return nil
+}
diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go
index 6f579a8ca..ceaf2538a 100644
--- a/pkg/local_object_storage/blobstor/blobstor.go
+++ b/pkg/local_object_storage/blobstor/blobstor.go
@@ -1,6 +1,7 @@
package blobstor
import (
+ "context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -40,14 +41,14 @@ type SubStorageInfo struct {
type Option func(*cfg)
type cfg struct {
- compression compression.Config
+ compression compression.Compressor
log *logger.Logger
storage []SubStorage
metrics Metrics
}
func initConfig(c *cfg) {
- c.log = &logger.Logger{Logger: zap.L()}
+ c.log = logger.NewLoggerWrapper(zap.L())
c.metrics = &noopMetrics{}
}
@@ -90,56 +91,19 @@ func WithStorages(st []SubStorage) Option {
// WithLogger returns option to specify BlobStor's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))}
+ c.log = l
}
}
-// WithCompressObjects returns option to toggle
-// compression of the stored objects.
-//
-// If true, Zstandard algorithm is used for data compression.
-//
-// If compressor (decompressor) creation failed,
-// the uncompressed option will be used, and the error
-// is recorded in the provided log.
-func WithCompressObjects(comp bool) Option {
+func WithCompression(comp compression.Config) Option {
return func(c *cfg) {
- c.compression.Enabled = comp
- }
-}
-
-// WithCompressibilityEstimate returns an option to use
-// normilized compressibility estimate to decide compress
-// data or not.
-//
-// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5
-func WithCompressibilityEstimate(v bool) Option {
- return func(c *cfg) {
- c.compression.UseCompressEstimation = v
- }
-}
-
-// WithCompressibilityEstimateThreshold returns an option to set
-// normilized compressibility estimate threshold.
-//
-// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5
-func WithCompressibilityEstimateThreshold(threshold float64) Option {
- return func(c *cfg) {
- c.compression.CompressEstimationThreshold = threshold
- }
-}
-
-// WithUncompressableContentTypes returns option to disable decompression
-// for specific content types as seen by object.AttributeContentType attribute.
-func WithUncompressableContentTypes(values []string) Option {
- return func(c *cfg) {
- c.compression.UncompressableContentTypes = values
+ c.compression.Config = comp
}
}
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
-func (b *BlobStor) SetReportErrorFunc(f func(string, error)) {
+func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) {
for i := range b.storage {
b.storage[i].Storage.SetReportErrorFunc(f)
}
@@ -151,6 +115,6 @@ func WithMetrics(m Metrics) Option {
}
}
-func (b *BlobStor) Compressor() *compression.Config {
- return &b.cfg.compression
+func (b *BlobStor) Compressor() *compression.Compressor {
+ return &b.compression
}
diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go
index bed5e0eb9..6ddeb6f00 100644
--- a/pkg/local_object_storage/blobstor/blobstor_test.go
+++ b/pkg/local_object_storage/blobstor/blobstor_test.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -51,10 +52,12 @@ func TestCompression(t *testing.T) {
newBlobStor := func(t *testing.T, compress bool) *BlobStor {
bs := New(
- WithCompressObjects(compress),
+ WithCompression(compression.Config{
+ Enabled: compress,
+ }),
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init())
+ require.NoError(t, bs.Init(context.Background()))
return bs
}
@@ -91,20 +94,20 @@ func TestCompression(t *testing.T) {
blobStor := newBlobStor(t, false)
testPut(t, blobStor, 0)
testGet(t, blobStor, 0)
- require.NoError(t, blobStor.Close())
+ require.NoError(t, blobStor.Close(context.Background()))
blobStor = newBlobStor(t, true)
testGet(t, blobStor, 0) // get uncompressed object with compress enabled
testPut(t, blobStor, 1)
testGet(t, blobStor, 1)
- require.NoError(t, blobStor.Close())
+ require.NoError(t, blobStor.Close(context.Background()))
blobStor = newBlobStor(t, false)
testGet(t, blobStor, 0) // get old uncompressed object
testGet(t, blobStor, 1) // get compressed object with compression disabled
testPut(t, blobStor, 2)
testGet(t, blobStor, 2)
- require.NoError(t, blobStor.Close())
+ require.NoError(t, blobStor.Close(context.Background()))
}
func TestBlobstor_needsCompression(t *testing.T) {
@@ -113,8 +116,10 @@ func TestBlobstor_needsCompression(t *testing.T) {
dir := t.TempDir()
bs := New(
- WithCompressObjects(compress),
- WithUncompressableContentTypes(ct),
+ WithCompression(compression.Config{
+ Enabled: compress,
+ UncompressableContentTypes: ct,
+ }),
WithStorages([]SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
@@ -130,7 +135,7 @@ func TestBlobstor_needsCompression(t *testing.T) {
},
}))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init())
+ require.NoError(t, bs.Init(context.Background()))
return bs
}
@@ -192,7 +197,7 @@ func TestConcurrentPut(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, blobStor.Init())
+ require.NoError(t, blobStor.Init(context.Background()))
testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)})
@@ -272,7 +277,7 @@ func TestConcurrentDelete(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, blobStor.Init())
+ require.NoError(t, blobStor.Init(context.Background()))
testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
var prm common.PutPrm
diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go
index 19e181ee7..788fe66f2 100644
--- a/pkg/local_object_storage/blobstor/common/rebuild.go
+++ b/pkg/local_object_storage/blobstor/common/rebuild.go
@@ -12,16 +12,27 @@ type RebuildRes struct {
}
type RebuildPrm struct {
- MetaStorage MetaStorage
- WorkerLimiter ConcurrentWorkersLimiter
- FillPercent int
+ MetaStorage MetaStorage
+ Limiter RebuildLimiter
+ FillPercent int
}
type MetaStorage interface {
UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error
}
-type ConcurrentWorkersLimiter interface {
- AcquireWorkSlot(ctx context.Context) error
- ReleaseWorkSlot()
+type ReleaseFunc func()
+
+type ConcurrencyLimiter interface {
+ AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error)
+}
+
+type RateLimiter interface {
+ ReadRequest(context.Context) (ReleaseFunc, error)
+ WriteRequest(context.Context) (ReleaseFunc, error)
+}
+
+type RebuildLimiter interface {
+ ConcurrencyLimiter
+ RateLimiter
}
diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go
index 4f3a20993..e35c35e60 100644
--- a/pkg/local_object_storage/blobstor/common/storage.go
+++ b/pkg/local_object_storage/blobstor/common/storage.go
@@ -12,18 +12,18 @@ import (
type Storage interface {
Open(mode mode.ComponentMode) error
Init() error
- Close() error
+ Close(context.Context) error
Type() string
Path() string
ObjectsCount(ctx context.Context) (uint64, error)
- SetCompressor(cc *compression.Config)
- Compressor() *compression.Config
+ SetCompressor(cc *compression.Compressor)
+ Compressor() *compression.Compressor
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
- SetReportErrorFunc(f func(string, error))
+ SetReportErrorFunc(f func(context.Context, string, error))
SetParentID(parentID string)
Get(context.Context, GetPrm) (GetRes, error)
diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go
index 9f70f8ec2..445a0494b 100644
--- a/pkg/local_object_storage/blobstor/compression/bench_test.go
+++ b/pkg/local_object_storage/blobstor/compression/bench_test.go
@@ -11,7 +11,7 @@ import (
)
func BenchmarkCompression(b *testing.B) {
- c := Config{Enabled: true}
+ c := Compressor{Config: Config{Enabled: true}}
require.NoError(b, c.Init())
for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} {
@@ -33,7 +33,7 @@ func BenchmarkCompression(b *testing.B) {
}
}
-func benchWith(b *testing.B, c Config, data []byte) {
+func benchWith(b *testing.B, c Compressor, data []byte) {
b.ResetTimer()
b.ReportAllocs()
for range b.N {
@@ -56,8 +56,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) {
b.Run("estimate", func(b *testing.B) {
b.ResetTimer()
- c := &Config{
- Enabled: true,
+ c := &Compressor{
+ Config: Config{
+ Enabled: true,
+ },
}
require.NoError(b, c.Init())
@@ -76,8 +78,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) {
b.Run("compress", func(b *testing.B) {
b.ResetTimer()
- c := &Config{
- Enabled: true,
+ c := &Compressor{
+ Config: Config{
+ Enabled: true,
+ },
}
require.NoError(b, c.Init())
diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go
index 85ab47692..c76cec9a1 100644
--- a/pkg/local_object_storage/blobstor/compression/compress.go
+++ b/pkg/local_object_storage/blobstor/compression/compress.go
@@ -4,21 +4,36 @@ import (
"bytes"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/klauspost/compress"
"github.com/klauspost/compress/zstd"
)
+type Level string
+
+const (
+ LevelDefault Level = ""
+ LevelOptimal Level = "optimal"
+ LevelFastest Level = "fastest"
+ LevelSmallestSize Level = "smallest_size"
+)
+
+type Compressor struct {
+ Config
+
+ encoder *zstd.Encoder
+ decoder *zstd.Decoder
+}
+
// Config represents common compression-related configuration.
type Config struct {
Enabled bool
UncompressableContentTypes []string
+ Level Level
- UseCompressEstimation bool
- CompressEstimationThreshold float64
-
- encoder *zstd.Encoder
- decoder *zstd.Decoder
+ EstimateCompressibility bool
+ EstimateCompressibilityThreshold float64
}
// zstdFrameMagic contains first 4 bytes of any compressed object
@@ -26,11 +41,11 @@ type Config struct {
var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
// Init initializes compression routines.
-func (c *Config) Init() error {
+func (c *Compressor) Init() error {
var err error
if c.Enabled {
- c.encoder, err = zstd.NewWriter(nil)
+ c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel()))
if err != nil {
return err
}
@@ -73,7 +88,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool {
// Decompress decompresses data if it starts with the magic
// and returns data untouched otherwise.
-func (c *Config) Decompress(data []byte) ([]byte, error) {
+func (c *Compressor) Decompress(data []byte) ([]byte, error) {
if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) {
return data, nil
}
@@ -82,13 +97,13 @@ func (c *Config) Decompress(data []byte) ([]byte, error) {
// Compress compresses data if compression is enabled
// and returns data untouched otherwise.
-func (c *Config) Compress(data []byte) []byte {
+func (c *Compressor) Compress(data []byte) []byte {
if c == nil || !c.Enabled {
return data
}
- if c.UseCompressEstimation {
+ if c.EstimateCompressibility {
estimated := compress.Estimate(data)
- if estimated >= c.CompressEstimationThreshold {
+ if estimated >= c.EstimateCompressibilityThreshold {
return c.compress(data)
}
return data
@@ -96,7 +111,7 @@ func (c *Config) Compress(data []byte) []byte {
return c.compress(data)
}
-func (c *Config) compress(data []byte) []byte {
+func (c *Compressor) compress(data []byte) []byte {
maxSize := c.encoder.MaxEncodedSize(len(data))
compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize))
if len(data) < len(compressed) {
@@ -106,7 +121,7 @@ func (c *Config) compress(data []byte) []byte {
}
// Close closes encoder and decoder, returns any error occurred.
-func (c *Config) Close() error {
+func (c *Compressor) Close() error {
var err error
if c.encoder != nil {
err = c.encoder.Close()
@@ -116,3 +131,24 @@ func (c *Config) Close() error {
}
return err
}
+
+func (c *Config) HasValidCompressionLevel() bool {
+ return c.Level == LevelDefault ||
+ c.Level == LevelOptimal ||
+ c.Level == LevelFastest ||
+ c.Level == LevelSmallestSize
+}
+
+func (c *Compressor) compressionLevel() zstd.EncoderLevel {
+ switch c.Level {
+ case LevelDefault, LevelOptimal:
+ return zstd.SpeedDefault
+ case LevelFastest:
+ return zstd.SpeedFastest
+ case LevelSmallestSize:
+ return zstd.SpeedBestCompression
+ default:
+ assert.Fail("unknown compression level", string(c.Level))
+ return zstd.SpeedDefault
+ }
+}
diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go
index 9b414a9be..0418eedd0 100644
--- a/pkg/local_object_storage/blobstor/control.go
+++ b/pkg/local_object_storage/blobstor/control.go
@@ -6,13 +6,14 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"go.uber.org/zap"
)
// Open opens BlobStor.
func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
- b.log.Debug(logs.BlobstorOpening)
+ b.log.Debug(ctx, logs.BlobstorOpening)
b.modeMtx.Lock()
defer b.modeMtx.Unlock()
@@ -50,9 +51,13 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
// If BlobStor is already initialized, no action is taken.
//
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
-func (b *BlobStor) Init() error {
- b.log.Debug(logs.BlobstorInitializing)
+func (b *BlobStor) Init(ctx context.Context) error {
+ b.log.Debug(ctx, logs.BlobstorInitializing)
+ if !b.compression.HasValidCompressionLevel() {
+ b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level)))
+ b.compression.Level = compression.LevelDefault
+ }
if err := b.compression.Init(); err != nil {
return err
}
@@ -67,14 +72,14 @@ func (b *BlobStor) Init() error {
}
// Close releases all internal resources of BlobStor.
-func (b *BlobStor) Close() error {
- b.log.Debug(logs.BlobstorClosing)
+func (b *BlobStor) Close(ctx context.Context) error {
+ b.log.Debug(ctx, logs.BlobstorClosing)
var firstErr error
for i := range b.storage {
- err := b.storage[i].Storage.Close()
+ err := b.storage[i].Storage.Close(ctx)
if err != nil {
- b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
+ b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
if firstErr == nil {
firstErr = err
}
diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go
index c91508e6d..86d8f15e3 100644
--- a/pkg/local_object_storage/blobstor/delete.go
+++ b/pkg/local_object_storage/blobstor/delete.go
@@ -39,7 +39,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
if err == nil || !client.IsErrObjectNotFound(err) {
if err == nil {
success = true
- logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
+ logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
}
return res, err
}
@@ -58,7 +58,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
res, err := st.Delete(ctx, prm)
if err == nil {
success = true
- logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
+ logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
}
return res, err
diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go
index 43feec7c9..c155e15b8 100644
--- a/pkg/local_object_storage/blobstor/exists.go
+++ b/pkg/local_object_storage/blobstor/exists.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -73,10 +72,9 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
}
for _, err := range errors[:len(errors)-1] {
- b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
+ b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
zap.Stringer("address", prm.Address),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
}
return common.ExistsRes{}, errors[len(errors)-1]
diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go
index 783c198b2..7eb7d49bf 100644
--- a/pkg/local_object_storage/blobstor/exists_test.go
+++ b/pkg/local_object_storage/blobstor/exists_test.go
@@ -22,7 +22,7 @@ func TestExists(t *testing.T) {
b := New(WithStorages(storages))
require.NoError(t, b.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, b.Init())
+ require.NoError(t, b.Init(context.Background()))
objects := []*objectSDK.Object{
testObject(smallSizeLimit / 2),
diff --git a/pkg/local_object_storage/blobstor/fstree/control.go b/pkg/local_object_storage/blobstor/fstree/control.go
index c21d79f09..2544729f7 100644
--- a/pkg/local_object_storage/blobstor/fstree/control.go
+++ b/pkg/local_object_storage/blobstor/fstree/control.go
@@ -1,6 +1,8 @@
package fstree
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
)
@@ -28,7 +30,7 @@ func (t *FSTree) Init() error {
}
// Close implements common.Storage.
-func (t *FSTree) Close() error {
+func (t *FSTree) Close(_ context.Context) error {
t.metrics.Close()
return nil
}
diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go
index b5dbc9e40..3caee7ee1 100644
--- a/pkg/local_object_storage/blobstor/fstree/counter.go
+++ b/pkg/local_object_storage/blobstor/fstree/counter.go
@@ -2,6 +2,8 @@ package fstree
import (
"sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
// FileCounter used to count files in FSTree. The implementation must be thread-safe.
@@ -52,16 +54,11 @@ func (c *SimpleCounter) Dec(size uint64) {
c.mtx.Lock()
defer c.mtx.Unlock()
- if c.count > 0 {
- c.count--
- } else {
- panic("fstree.SimpleCounter: invalid count")
- }
- if c.size >= size {
- c.size -= size
- } else {
- panic("fstree.SimpleCounter: invalid size")
- }
+ assert.True(c.count > 0, "fstree.SimpleCounter: invalid count")
+ c.count--
+
+ assert.True(c.size >= size, "fstree.SimpleCounter: invalid size")
+ c.size -= size
}
func (c *SimpleCounter) CountSize() (uint64, uint64) {
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 057796db2..112741ab4 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -45,7 +45,7 @@ type FSTree struct {
log *logger.Logger
- *compression.Config
+ compressor *compression.Compressor
Depth uint64
DirNameLen int
@@ -82,12 +82,12 @@ func New(opts ...Option) *FSTree {
Permissions: 0o700,
RootPath: "./",
},
- Config: nil,
+ compressor: nil,
Depth: 4,
DirNameLen: DirNameLen,
metrics: &noopMetrics{},
fileCounter: &noopCounter{},
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
}
for i := range opts {
opts[i](f)
@@ -152,8 +152,8 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
des, err := os.ReadDir(dirPath)
if err != nil {
if prm.IgnoreErrors {
- t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
- zap.String("err", err.Error()),
+ t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.Error(err),
zap.String("directory_path", dirPath))
return nil
}
@@ -196,13 +196,13 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
}
if err == nil {
- data, err = t.Decompress(data)
+ data, err = t.compressor.Decompress(data)
}
if err != nil {
if prm.IgnoreErrors {
- t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+ t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", addr),
- zap.String("err", err.Error()),
+ zap.Error(err),
zap.String("path", path))
continue
}
@@ -405,7 +405,7 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err
return common.PutRes{}, err
}
if !prm.DontCompress {
- prm.RawData = t.Compress(prm.RawData)
+ prm.RawData = t.compressor.Compress(prm.RawData)
}
size = len(prm.RawData)
@@ -448,7 +448,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err
}
}
- data, err = t.Decompress(data)
+ data, err = t.compressor.Decompress(data)
if err != nil {
return common.GetRes{}, err
}
@@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) {
},
)
if err != nil {
- return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
+ return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
}
return count, size, nil
@@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
},
)
if err != nil {
- return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
+ return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
}
success = true
return result, nil
@@ -597,16 +597,16 @@ func (t *FSTree) Path() string {
}
// SetCompressor implements common.Storage.
-func (t *FSTree) SetCompressor(cc *compression.Config) {
- t.Config = cc
+func (t *FSTree) SetCompressor(cc *compression.Compressor) {
+ t.compressor = cc
}
-func (t *FSTree) Compressor() *compression.Config {
- return t.Config
+func (t *FSTree) Compressor() *compression.Compressor {
+ return t.compressor
}
// SetReportErrorFunc implements common.Storage.
-func (t *FSTree) SetReportErrorFunc(_ func(string, error)) {
+func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) {
// Do nothing, FSTree can encounter only one error which is returned.
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index eb2126b6c..50dae46a7 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -52,7 +52,7 @@ func TestObjectCounter(t *testing.T) {
require.Equal(t, uint64(0), size)
defer func() {
- require.NoError(t, fst.Close())
+ require.NoError(t, fst.Close(context.Background()))
}()
addr := oidtest.Address()
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
index 4110ba7d7..6d633dad6 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
@@ -67,12 +67,9 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error {
err := w.writeFile(tmpPath, data)
if err != nil {
var pe *fs.PathError
- if errors.As(err, &pe) {
- switch pe.Err {
- case syscall.ENOSPC:
- err = common.ErrNoSpace
- _ = os.RemoveAll(tmpPath)
- }
+ if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) {
+ err = common.ErrNoSpace
+ _ = os.RemoveAll(tmpPath)
}
return err
}
@@ -136,6 +133,6 @@ func (w *genericWriter) removeWithCounter(p string, size uint64) error {
if err := os.Remove(p); err != nil {
return err
}
- w.fileCounter.Dec(uint64(size))
+ w.fileCounter.Dec(size)
return nil
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
index 3561c616b..49cbda344 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
@@ -69,10 +69,13 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
if err != nil {
return err
}
+ written := 0
tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10)
n, err := unix.Write(fd, data)
- if err == nil {
- if n == len(data) {
+ for err == nil {
+ written += n
+
+ if written == len(data) {
err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW)
if err == nil {
w.fileCounter.Inc(uint64(len(data)))
@@ -80,9 +83,23 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
if errors.Is(err, unix.EEXIST) {
err = nil
}
- } else {
- err = errors.New("incomplete write")
+ break
}
+
+ // From man 2 write:
+ // https://www.man7.org/linux/man-pages/man2/write.2.html
+ //
+ // Note that a successful write() may transfer fewer than count
+ // bytes. Such partial writes can occur for various reasons; for
+ // example, because there was insufficient space on the disk device
+ // to write all of the requested bytes, or because a blocked write()
+ // to a socket, pipe, or similar was interrupted by a signal handler
+ // after it had transferred some, but before it had transferred all
+ // of the requested bytes. In the event of a partial write, the
+ // caller can make another write() call to transfer the remaining
+ // bytes. The subsequent call will either transfer further bytes or
+ // may result in an error (e.g., if the disk is now full).
+ n, err = unix.Write(fd, data[written:])
}
errClose := unix.Close(fd)
if err != nil {
@@ -114,7 +131,7 @@ func (w *linuxWriter) removeFile(p string, size uint64) error {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if err == nil {
- w.fileCounter.Dec(uint64(size))
+ w.fileCounter.Dec(size)
}
return err
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go
new file mode 100644
index 000000000..7fae2e695
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go
@@ -0,0 +1,42 @@
+//go:build linux && integration
+
+package fstree
+
+import (
+ "context"
+ "errors"
+ "os"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sys/unix"
+)
+
+func TestENOSPC(t *testing.T) {
+ dir, err := os.MkdirTemp(t.TempDir(), "ramdisk")
+ require.NoError(t, err)
+
+ f, err := os.CreateTemp(t.TempDir(), "ramdisk_*")
+ require.NoError(t, err)
+
+ err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M")
+ if errors.Is(err, unix.EPERM) {
+ t.Skipf("skip size tests: no permission to mount: %v", err)
+ return
+ }
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, unix.Unmount(dir, 0))
+ }()
+
+ fst := New(WithPath(dir), WithDepth(1))
+ require.NoError(t, fst.Open(mode.ComponentReadWrite))
+ require.NoError(t, fst.Init())
+
+ _, err = fst.Put(context.Background(), common.PutPrm{
+ RawData: make([]byte, 10<<20),
+ })
+ require.ErrorIs(t, err, common.ErrNoSpace)
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go
index 4d1f8fc22..6f2ac87e1 100644
--- a/pkg/local_object_storage/blobstor/fstree/option.go
+++ b/pkg/local_object_storage/blobstor/fstree/option.go
@@ -4,7 +4,6 @@ import (
"io/fs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
type Option func(*FSTree)
@@ -53,6 +52,6 @@ func WithFileCounter(c FileCounter) Option {
func WithLogger(l *logger.Logger) Option {
return func(f *FSTree) {
- f.log = &logger.Logger{Logger: l.With(zap.String("component", "FSTree"))}
+ f.log = l
}
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
index 21c80b089..b8e88f84a 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
@@ -19,7 +19,7 @@ func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
require.NoError(t, s.Init())
objects := prepare(t, 10, s, minSize, maxSize)
- require.NoError(t, s.Close())
+ require.NoError(t, s.Close(context.Background()))
require.NoError(t, s.Open(mode.ComponentReadOnly))
for i := range objects {
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
index cf4e76513..3a163f6b1 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
@@ -15,7 +15,7 @@ func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 4, s, minSize, maxSize)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
index 08465ed5e..f34fe5f97 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
@@ -14,7 +14,7 @@ func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 1, s, minSize, maxSize)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
index d1f709b0c..af0f4b45d 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
@@ -15,7 +15,7 @@ func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 2, s, minSize, maxSize)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
index fcbeddac7..13032048c 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
@@ -17,7 +17,7 @@ func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 1, s, minSize, maxSize)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
index 3a6c8b699..d54c54f59 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
@@ -3,6 +3,7 @@ package blobstortest
import (
"context"
"errors"
+ "slices"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -14,7 +15,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 10, s, minSize, maxSize)
@@ -26,7 +27,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
_, err := s.Delete(context.Background(), delPrm)
require.NoError(t, err)
- objects = append(objects[:delID], objects[delID+1:]...)
+ objects = slices.Delete(objects, delID, delID+1)
runTestNormalHandler(t, s, objects)
@@ -49,7 +50,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc)
_, err := s.Iterate(context.Background(), iterPrm)
require.NoError(t, err)
- require.Equal(t, len(objects), len(seen))
+ require.Len(t, objects, len(seen))
for i := range objects {
d, ok := seen[objects[i].addr.String()]
require.True(t, ok)
diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go
index f213d7547..ff1aa9d64 100644
--- a/pkg/local_object_storage/blobstor/iterate.go
+++ b/pkg/local_object_storage/blobstor/iterate.go
@@ -42,10 +42,10 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
_, err := b.storage[i].Storage.Iterate(ctx, prm)
if err != nil {
if prm.IgnoreErrors {
- b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+ b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.String("storage_path", b.storage[i].Storage.Path()),
zap.String("storage_type", b.storage[i].Storage.Type()),
- zap.String("err", err.Error()))
+ zap.Error(err))
continue
}
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go
index 079728380..2786321a8 100644
--- a/pkg/local_object_storage/blobstor/iterate_test.go
+++ b/pkg/local_object_storage/blobstor/iterate_test.go
@@ -3,10 +3,14 @@ package blobstor
import (
"context"
"encoding/binary"
+ "errors"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -21,7 +25,9 @@ func TestIterateObjects(t *testing.T) {
// create BlobStor instance
blobStor := New(
WithStorages(defaultStorages(p, smalSz)),
- WithCompressObjects(true),
+ WithCompression(compression.Config{
+ Enabled: true,
+ }),
)
defer os.RemoveAll(p)
@@ -30,9 +36,9 @@ func TestIterateObjects(t *testing.T) {
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
// initialize Blobstor
- require.NoError(t, blobStor.Init())
+ require.NoError(t, blobStor.Init(context.Background()))
- defer blobStor.Close()
+ defer blobStor.Close(context.Background())
const objNum = 5
@@ -44,7 +50,7 @@ func TestIterateObjects(t *testing.T) {
mObjs := make(map[string]addrData)
- for i := uint64(0); i < objNum; i++ {
+ for i := range uint64(objNum) {
sz := smalSz
big := i < objNum/2
@@ -90,117 +96,60 @@ func TestIterateObjects(t *testing.T) {
}
func TestIterate_IgnoreErrors(t *testing.T) {
- t.Skip()
- // dir := t.TempDir()
- //
- // const (
- // smallSize = 512
- // objCount = 5
- // )
- // bsOpts := []Option{
- // WithCompressObjects(true),
- // WithRootPath(dir),
- // WithSmallSizeLimit(smallSize * 2), // + header
- // WithBlobovniczaOpenedCacheSize(1),
- // WithBlobovniczaShallowWidth(1),
- // WithBlobovniczaShallowDepth(1)}
- // bs := New(bsOpts...)
- // require.NoError(t, bs.Open(false))
- // require.NoError(t, bs.Init())
- //
- // addrs := make([]oid.Address, objCount)
- // for i := range addrs {
- // addrs[i] = oidtest.Address()
- //
- // obj := object.New()
- // obj.SetContainerID(addrs[i].Container())
- // obj.SetID(addrs[i].Object())
- // obj.SetPayload(make([]byte, smallSize<<(i%2)))
- //
- // objData, err := obj.Marshal()
- // require.NoError(t, err)
- //
- // _, err = bs.PutRaw(addrs[i], objData, true)
- // require.NoError(t, err)
- // }
- //
- // // Construct corrupted compressed object.
- // buf := bytes.NewBuffer(nil)
- // badObject := make([]byte, smallSize/2+1)
- // enc, err := zstd.NewWriter(buf)
- // require.NoError(t, err)
- // rawData := enc.EncodeAll(badObject, nil)
- // for i := 4; /* magic size */ i < len(rawData); i += 2 {
- // rawData[i] ^= 0xFF
- // }
- // // Will be put uncompressed but fetched as compressed because of magic.
- // _, err = bs.PutRaw(oidtest.Address(), rawData, false)
- // require.NoError(t, err)
- // require.NoError(t, bs.fsTree.Put(oidtest.Address(), rawData))
- //
- // require.NoError(t, bs.Close())
- //
- // // Increase width to have blobovnicza which is definitely empty.
- // b := New(append(bsOpts, WithBlobovniczaShallowWidth(2))...)
- // require.NoError(t, b.Open(false))
- // require.NoError(t, b.Init())
- //
- // var p string
- // for i := 0; i < 2; i++ {
- // bp := filepath.Join(bs.rootPath, "1", strconv.FormatUint(uint64(i), 10))
- // if _, ok := bs.blobovniczas.opened.Get(bp); !ok {
- // p = bp
- // break
- // }
- // }
- // require.NotEqual(t, "", p, "expected to not have at least 1 blobovnicza in cache")
- // require.NoError(t, os.Chmod(p, 0))
- //
- // require.NoError(t, b.Close())
- // require.NoError(t, bs.Open(false))
- // require.NoError(t, bs.Init())
- //
- // var prm IteratePrm
- // prm.SetIterationHandler(func(e IterationElement) error {
- // return nil
- // })
- // _, err = bs.Iterate(prm)
- // require.Error(t, err)
- //
- // prm.IgnoreErrors()
- //
- // t.Run("skip invalid objects", func(t *testing.T) {
- // actual := make([]oid.Address, 0, len(addrs))
- // prm.SetIterationHandler(func(e IterationElement) error {
- // obj := object.New()
- // err := obj.Unmarshal(e.data)
- // if err != nil {
- // return err
- // }
- //
- // var addr oid.Address
- // cnr, _ := obj.ContainerID()
- // addr.SetContainer(cnr)
- // id, _ := obj.ID()
- // addr.SetObject(id)
- // actual = append(actual, addr)
- // return nil
- // })
- //
- // _, err := bs.Iterate(prm)
- // require.NoError(t, err)
- // require.ElementsMatch(t, addrs, actual)
- // })
- // t.Run("return errors from handler", func(t *testing.T) {
- // n := 0
- // expectedErr := errors.New("expected error")
- // prm.SetIterationHandler(func(e IterationElement) error {
- // if n++; n == objCount/2 {
- // return expectedErr
- // }
- // return nil
- // })
- // _, err := bs.Iterate(prm)
- // require.ErrorIs(t, err, expectedErr)
- // })
+ ctx := context.Background()
+
+ myErr := errors.New("unique error")
+ nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil }
+ panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") }
+ errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr }
+
+ var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error)
+ st1 := teststore.New(
+ teststore.WithSubstorage(memstore.New()),
+ teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
+ return s1iter(prm)
+ }))
+ st2 := teststore.New(
+ teststore.WithSubstorage(memstore.New()),
+ teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
+ return s2iter(prm)
+ }))
+
+ bsOpts := []Option{WithStorages([]SubStorage{
+ {Storage: st1},
+ {Storage: st2},
+ })}
+ bs := New(bsOpts...)
+ require.NoError(t, bs.Open(ctx, mode.ReadWrite))
+ require.NoError(t, bs.Init(ctx))
+
+ nopHandler := func(e common.IterationElement) error {
+ return nil
+ }
+
+ t.Run("no errors", func(t *testing.T) {
+ s1iter = nopIter
+ s2iter = nopIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
+ require.NoError(t, err)
+ })
+ t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) {
+ s1iter = errIter
+ s2iter = panicIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
+ require.ErrorIs(t, err, myErr)
+ })
+
+ t.Run("ignore errors, storage 1", func(t *testing.T) {
+ s1iter = errIter
+ s2iter = nopIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
+ require.NoError(t, err)
+ })
+ t.Run("ignore errors, storage 2", func(t *testing.T) {
+ s1iter = nopIter
+ s2iter = errIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
+ require.NoError(t, err)
+ })
}
diff --git a/pkg/local_object_storage/blobstor/logger.go b/pkg/local_object_storage/blobstor/logger.go
index 7e057a0e3..070b1eac9 100644
--- a/pkg/local_object_storage/blobstor/logger.go
+++ b/pkg/local_object_storage/blobstor/logger.go
@@ -1,6 +1,8 @@
package blobstor
import (
+ "context"
+
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -11,8 +13,8 @@ const (
putOp = "PUT"
)
-func logOp(l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
- storagelog.Write(l,
+func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
+ storagelog.Write(ctx, l,
storagelog.AddressField(addr),
storagelog.OpField(op),
storagelog.StorageTypeField(typ),
diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go
index 449d4352a..3df96a1c3 100644
--- a/pkg/local_object_storage/blobstor/memstore/control.go
+++ b/pkg/local_object_storage/blobstor/memstore/control.go
@@ -1,6 +1,8 @@
package memstore
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
@@ -10,11 +12,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error {
return nil
}
-func (s *memstoreImpl) Init() error { return nil }
-func (s *memstoreImpl) Close() error { return nil }
-func (s *memstoreImpl) Type() string { return Type }
-func (s *memstoreImpl) Path() string { return s.rootPath }
-func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
-func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
-func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f }
-func (s *memstoreImpl) SetParentID(string) {}
+func (s *memstoreImpl) Init() error { return nil }
+func (s *memstoreImpl) Close(context.Context) error { return nil }
+func (s *memstoreImpl) Type() string { return Type }
+func (s *memstoreImpl) Path() string { return s.rootPath }
+func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc }
+func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression }
+func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {}
+func (s *memstoreImpl) SetParentID(string) {}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go
index 0252c7983..7ef7e37a4 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore.go
@@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes,
// Decompress the data.
var err error
if data, err = s.compression.Decompress(data); err != nil {
- return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
+ return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
}
// Unmarshal the SDK object.
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
+ return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
@@ -133,11 +133,11 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common
elem := common.IterationElement{
ObjectData: v,
}
- if err := elem.Address.DecodeString(string(k)); err != nil {
+ if err := elem.Address.DecodeString(k); err != nil {
if req.IgnoreErrors {
continue
}
- return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err))
+ return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err))
}
var err error
if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil {
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
index 8d1480dff..f904d4232 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
@@ -16,9 +15,8 @@ import (
func TestSimpleLifecycle(t *testing.T) {
s := New(
WithRootPath("memstore"),
- WithLogger(test.NewLogger(t)),
)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go
index 3d67b1e9c..7605af4e5 100644
--- a/pkg/local_object_storage/blobstor/memstore/option.go
+++ b/pkg/local_object_storage/blobstor/memstore/option.go
@@ -2,33 +2,20 @@ package memstore
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
type cfg struct {
- log *logger.Logger
rootPath string
readOnly bool
- compression *compression.Config
- reportError func(string, error)
+ compression *compression.Compressor
}
func defaultConfig() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- reportError: func(string, error) {},
- }
+ return &cfg{}
}
type Option func(*cfg)
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
-
func WithRootPath(p string) Option {
return func(c *cfg) {
c.rootPath = p
diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go
index a579a6f92..80268fa7a 100644
--- a/pkg/local_object_storage/blobstor/mode.go
+++ b/pkg/local_object_storage/blobstor/mode.go
@@ -8,7 +8,7 @@ import (
)
// SetMode sets the blobstor mode of operation.
-func (b *BlobStor) SetMode(m mode.Mode) error {
+func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
b.modeMtx.Lock()
defer b.modeMtx.Unlock()
@@ -20,14 +20,14 @@ func (b *BlobStor) SetMode(m mode.Mode) error {
return nil
}
- err := b.Close()
+ err := b.Close(ctx)
if err == nil {
- if err = b.openBlobStor(context.TODO(), m); err == nil {
- err = b.Init()
+ if err = b.openBlobStor(ctx, m); err == nil {
+ err = b.Init(ctx)
}
}
if err != nil {
- return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
+ return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
}
b.mode = m
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index 1ac769e36..64e3c8da1 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -106,7 +106,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close()) }()
+ defer func() { require.NoError(b, st.Close(context.Background())) }()
// Fill database
var errG errgroup.Group
@@ -161,7 +161,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
gen := genEntry.create()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close()) }()
+ defer func() { require.NoError(b, st.Close(context.Background())) }()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
@@ -200,7 +200,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close()) }()
+ defer func() { require.NoError(b, st.Close(context.Background())) }()
// Fill database
for range tt.size {
diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go
index 1adae303d..fe9c109dd 100644
--- a/pkg/local_object_storage/blobstor/put.go
+++ b/pkg/local_object_storage/blobstor/put.go
@@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
// marshal object
data, err := prm.Object.Marshal()
if err != nil {
- return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err)
+ return common.PutRes{}, fmt.Errorf("marshal the object: %w", err)
}
prm.RawData = data
}
@@ -63,7 +63,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
res, err := b.storage[i].Storage.Put(ctx, prm)
if err == nil {
success = true
- logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
+ logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
}
return res, err
}
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
index 7b2786ba2..f28816555 100644
--- a/pkg/local_object_storage/blobstor/rebuild.go
+++ b/pkg/local_object_storage/blobstor/rebuild.go
@@ -13,24 +13,19 @@ type StorageIDUpdate interface {
UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error
}
-type ConcurrentWorkersLimiter interface {
- AcquireWorkSlot(ctx context.Context) error
- ReleaseWorkSlot()
-}
-
-func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, fillPercent int) error {
+func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error {
var summary common.RebuildRes
var rErr error
for _, storage := range b.storage {
res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{
- MetaStorage: upd,
- WorkerLimiter: limiter,
- FillPercent: fillPercent,
+ MetaStorage: upd,
+ Limiter: concLimiter,
+ FillPercent: fillPercent,
})
summary.FilesRemoved += res.FilesRemoved
summary.ObjectsMoved += res.ObjectsMoved
if err != nil {
- b.log.Error(logs.BlobstorRebuildFailedToRebuildStorages,
+ b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages,
zap.String("failed_storage_path", storage.Storage.Path()),
zap.String("failed_storage_type", storage.Storage.Type()),
zap.Error(err))
@@ -38,7 +33,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter Con
break
}
}
- b.log.Info(logs.BlobstorRebuildRebuildStoragesCompleted,
+ b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted,
zap.Bool("success", rErr == nil),
zap.Uint64("total_files_removed", summary.FilesRemoved),
zap.Uint64("total_objects_moved", summary.ObjectsMoved))
diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go
index bc0bed49d..3a38ecf82 100644
--- a/pkg/local_object_storage/blobstor/teststore/option.go
+++ b/pkg/local_object_storage/blobstor/teststore/option.go
@@ -1,6 +1,8 @@
package teststore
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -15,9 +17,9 @@ type cfg struct {
Type func() string
Path func() string
- SetCompressor func(cc *compression.Config)
- Compressor func() *compression.Config
- SetReportErrorFunc func(f func(string, error))
+ SetCompressor func(cc *compression.Compressor)
+ Compressor func() *compression.Compressor
+ SetReportErrorFunc func(f func(context.Context, string, error))
Get func(common.GetPrm) (common.GetRes, error)
GetRange func(common.GetRangePrm) (common.GetRangeRes, error)
@@ -43,15 +45,15 @@ func WithClose(f func() error) Option { return func(c *cfg) { c
func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } }
func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } }
-func WithSetCompressor(f func(*compression.Config)) Option {
+func WithSetCompressor(f func(*compression.Compressor)) Option {
return func(c *cfg) { c.overrides.SetCompressor = f }
}
-func WithCompressor(f func() *compression.Config) Option {
+func WithCompressor(f func() *compression.Compressor) Option {
return func(c *cfg) { c.overrides.Compressor = f }
}
-func WithReportErrorFunc(f func(func(string, error))) Option {
+func WithReportErrorFunc(f func(func(context.Context, string, error))) Option {
return func(c *cfg) { c.overrides.SetReportErrorFunc = f }
}
diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go
index fea4a2d49..190b6a876 100644
--- a/pkg/local_object_storage/blobstor/teststore/teststore.go
+++ b/pkg/local_object_storage/blobstor/teststore/teststore.go
@@ -77,14 +77,14 @@ func (s *TestStore) Init() error {
}
}
-func (s *TestStore) Close() error {
+func (s *TestStore) Close(ctx context.Context) error {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Close != nil:
return s.overrides.Close()
case s.st != nil:
- return s.st.Close()
+ return s.st.Close(ctx)
default:
panic("unexpected storage call: Close()")
}
@@ -116,7 +116,7 @@ func (s *TestStore) Path() string {
}
}
-func (s *TestStore) SetCompressor(cc *compression.Config) {
+func (s *TestStore) SetCompressor(cc *compression.Compressor) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
@@ -129,7 +129,7 @@ func (s *TestStore) SetCompressor(cc *compression.Config) {
}
}
-func (s *TestStore) Compressor() *compression.Config {
+func (s *TestStore) Compressor() *compression.Compressor {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
@@ -142,7 +142,7 @@ func (s *TestStore) Compressor() *compression.Config {
}
}
-func (s *TestStore) SetReportErrorFunc(f func(string, error)) {
+func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go
index e45f502ac..e0617a832 100644
--- a/pkg/local_object_storage/engine/container.go
+++ b/pkg/local_object_storage/engine/container.go
@@ -44,22 +44,25 @@ func (r ListContainersRes) Containers() []cid.ID {
// ContainerSize returns the sum of estimation container sizes among all shards.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
+func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) {
+ defer elapsed("ContainerSize", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
- res, err = e.containerSize(prm)
- return err
+ var csErr error
+ res, csErr = e.containerSize(ctx, prm)
+ return csErr
})
return
}
// ContainerSize calls ContainerSize method on engine to calculate sum of estimation container sizes among all shards.
-func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) {
+func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, error) {
var prm ContainerSizePrm
prm.SetContainerID(id)
- res, err := e.ContainerSize(prm)
+ res, err := e.ContainerSize(ctx, prm)
if err != nil {
return 0, err
}
@@ -67,18 +70,15 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) {
return res.Size(), nil
}
-func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
- if e.metrics != nil {
- defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)()
- }
-
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
+ var res ContainerSizeRes
+ err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
var csPrm shard.ContainerSizePrm
csPrm.SetContainerID(prm.cnr)
- csRes, err := sh.Shard.ContainerSize(csPrm)
+ csRes, err := sh.ContainerSize(ctx, csPrm)
if err != nil {
- e.reportShardError(sh, "can't get container size", err,
+ e.reportShardError(ctx, sh, "can't get container size", err,
zap.Stringer("container_id", prm.cnr))
return false
}
@@ -88,16 +88,19 @@ func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRe
return false
})
- return
+ return res, err
}
// ListContainers returns a unique container IDs presented in the engine objects.
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) (res ListContainersRes, err error) {
+ defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
- res, err = e.listContainers(ctx)
- return err
+ var lcErr error
+ res, lcErr = e.listContainers(ctx)
+ return lcErr
})
return
@@ -116,16 +119,12 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) {
}
func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) {
- if e.metrics != nil {
- defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
- }
-
uniqueIDs := make(map[string]cid.ID)
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{})
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ res, err := sh.ListContainers(ctx, shard.ListContainersPrm{})
if err != nil {
- e.reportShardError(sh, "can't get list of containers", err)
+ e.reportShardError(ctx, sh, "can't get list of containers", err)
return false
}
@@ -137,7 +136,9 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes,
}
return false
- })
+ }); err != nil {
+ return ListContainersRes{}, err
+ }
result := make([]cid.ID, 0, len(uniqueIDs))
for _, v := range uniqueIDs {
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index 4778cf539..bf1649f6e 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -22,10 +22,6 @@ type shardInitError struct {
// Open opens all StorageEngine's components.
func (e *StorageEngine) Open(ctx context.Context) error {
- return e.open(ctx)
-}
-
-func (e *StorageEngine) open(ctx context.Context) error {
e.mtx.Lock()
defer e.mtx.Unlock()
@@ -49,16 +45,16 @@ func (e *StorageEngine) open(ctx context.Context) error {
for res := range errCh {
if res.err != nil {
- e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping,
+ e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
sh := e.shards[res.id]
delete(e.shards, res.id)
- err := sh.Close()
+ err := sh.Close(ctx)
if err != nil {
- e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
+ e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
@@ -77,7 +73,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
errCh := make(chan shardInitError, len(e.shards))
var eg errgroup.Group
- if e.cfg.lowMem && e.anyShardRequiresRefill() {
+ if e.lowMem && e.anyShardRequiresRefill() {
eg.SetLimit(1)
}
@@ -95,29 +91,29 @@ func (e *StorageEngine) Init(ctx context.Context) error {
err := eg.Wait()
close(errCh)
if err != nil {
- return fmt.Errorf("failed to initialize shards: %w", err)
+ return fmt.Errorf("initialize shards: %w", err)
}
for res := range errCh {
if res.err != nil {
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
- e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping,
+ e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
sh := e.shards[res.id]
delete(e.shards, res.id)
- err := sh.Close()
+ err := sh.Close(ctx)
if err != nil {
- e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
+ e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
continue
}
- return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
+ return fmt.Errorf("initialize shard %s: %w", res.id, res.err)
}
}
@@ -126,7 +122,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
}
e.wg.Add(1)
- go e.setModeLoop()
+ go e.setModeLoop(ctx)
return nil
}
@@ -149,25 +145,19 @@ var errClosed = errors.New("storage engine is closed")
func (e *StorageEngine) Close(ctx context.Context) error {
close(e.closeCh)
defer e.wg.Wait()
- return e.setBlockExecErr(ctx, errClosed)
+ return e.closeEngine(ctx)
}
// closes all shards. Never returns an error, shard errors are logged.
-func (e *StorageEngine) close(releasePools bool) error {
+func (e *StorageEngine) closeAllShards(ctx context.Context) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
- if releasePools {
- for _, p := range e.shardPools {
- p.Release()
- }
- }
-
for id, sh := range e.shards {
- if err := sh.Close(); err != nil {
- e.log.Debug(logs.EngineCouldNotCloseShard,
+ if err := sh.Close(ctx); err != nil {
+ e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
zap.String("id", id),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
}
@@ -182,90 +172,29 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error {
e.blockExec.mtx.RLock()
defer e.blockExec.mtx.RUnlock()
- if e.blockExec.err != nil {
- return e.blockExec.err
+ if e.blockExec.closed {
+ return errClosed
}
return op()
}
-// sets the flag of blocking execution of all data operations according to err:
-// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method
-// (if err == errClosed => additionally releases pools and does not allow to resume executions).
-// - otherwise, resumes execution. If exec was blocked, calls open method.
-//
-// Can be called concurrently with exec. In this case it waits for all executions to complete.
-func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error {
+func (e *StorageEngine) closeEngine(ctx context.Context) error {
e.blockExec.mtx.Lock()
defer e.blockExec.mtx.Unlock()
- prevErr := e.blockExec.err
-
- wasClosed := errors.Is(prevErr, errClosed)
- if wasClosed {
+ if e.blockExec.closed {
return errClosed
}
- e.blockExec.err = err
-
- if err == nil {
- if prevErr != nil { // block -> ok
- return e.open(ctx)
- }
- } else if prevErr == nil { // ok -> block
- return e.close(errors.Is(err, errClosed))
- }
-
- // otherwise do nothing
-
- return nil
-}
-
-// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err.
-// To resume the execution, use ResumeExecution method.
-//
-// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources
-// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions
-// to complete). Returns error if any Close has been called before.
-//
-// Must not be called concurrently with either Open or Init.
-//
-// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution
-// for this.
-func (e *StorageEngine) BlockExecution(err error) error {
- return e.setBlockExecErr(context.Background(), err)
-}
-
-// ResumeExecution resumes the execution of any data-related operation.
-// To block the execution, use BlockExecution method.
-//
-// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources
-// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions
-// to complete). Returns error if any Close has been called before.
-//
-// Must not be called concurrently with either Open or Init.
-func (e *StorageEngine) ResumeExecution() error {
- return e.setBlockExecErr(context.Background(), nil)
+ e.blockExec.closed = true
+ return e.closeAllShards(ctx)
}
type ReConfiguration struct {
- errorsThreshold uint32
- shardPoolSize uint32
-
shards map[string][]shard.Option // meta path -> shard opts
}
-// SetErrorsThreshold sets a size amount of errors after which
-// shard is moved to read-only mode.
-func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) {
- rCfg.errorsThreshold = errorsThreshold
-}
-
-// SetShardPoolSize sets a size of worker pool for each shard.
-func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) {
- rCfg.shardPoolSize = shardPoolSize
-}
-
// AddShard adds a shard for the reconfiguration.
// Shard identifier is calculated from paths used in blobstor.
func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
@@ -320,12 +249,12 @@ loop:
e.mtx.RUnlock()
- e.removeShards(shardsToRemove...)
+ e.removeShards(ctx, shardsToRemove...)
for _, p := range shardsToReload {
err := p.sh.Reload(ctx, p.opts...)
if err != nil {
- e.log.Error(logs.EngineCouldNotReloadAShard,
+ e.log.Error(ctx, logs.EngineCouldNotReloadAShard,
zap.Stringer("shard id", p.sh.ID()),
zap.Error(err))
}
@@ -334,7 +263,7 @@ loop:
for _, newID := range shardsToAdd {
sh, err := e.createShard(ctx, rcfg.shards[newID])
if err != nil {
- return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
+ return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err)
}
idStr := sh.ID().String()
@@ -344,17 +273,17 @@ loop:
err = sh.Init(ctx)
}
if err != nil {
- _ = sh.Close()
- return fmt.Errorf("could not init %s shard: %w", idStr, err)
+ _ = sh.Close(ctx)
+ return fmt.Errorf("init %s shard: %w", idStr, err)
}
err = e.addShard(sh)
if err != nil {
- _ = sh.Close()
- return fmt.Errorf("could not add %s shard: %w", idStr, err)
+ _ = sh.Close(ctx)
+ return fmt.Errorf("add %s shard: %w", idStr, err)
}
- e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr))
+ e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
}
return nil
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 2de92ae84..4ff0ed5ec 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -2,7 +2,6 @@ package engine
import (
"context"
- "errors"
"fmt"
"io/fs"
"os"
@@ -12,17 +11,14 @@ import (
"testing"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
@@ -163,42 +159,6 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O
require.Equal(t, 1, shardCount)
}
-func TestExecBlocks(t *testing.T) {
- e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
-
- // put some object
- obj := testutil.GenerateObjectWithCID(cidtest.ID())
-
- addr := object.AddressOf(obj)
-
- require.NoError(t, Put(context.Background(), e, obj))
-
- // block executions
- errBlock := errors.New("block exec err")
-
- require.NoError(t, e.BlockExecution(errBlock))
-
- // try to exec some op
- _, err := Head(context.Background(), e, addr)
- require.ErrorIs(t, err, errBlock)
-
- // resume executions
- require.NoError(t, e.ResumeExecution())
-
- _, err = Head(context.Background(), e, addr) // can be any data-related op
- require.NoError(t, err)
-
- // close
- require.NoError(t, e.Close(context.Background()))
-
- // try exec after close
- _, err = Head(context.Background(), e, addr)
- require.Error(t, err)
-
- // try to resume
- require.Error(t, e.ResumeExecution())
-}
-
func TestPersistentShardID(t *testing.T) {
dir := t.TempDir()
@@ -245,7 +205,6 @@ func TestReload(t *testing.T) {
// no new paths => no new shards
require.Equal(t, shardNum, len(e.shards))
- require.Equal(t, shardNum, len(e.shardPools))
newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum))
@@ -257,7 +216,6 @@ func TestReload(t *testing.T) {
require.NoError(t, e.Reload(context.Background(), rcfg))
require.Equal(t, shardNum+1, len(e.shards))
- require.Equal(t, shardNum+1, len(e.shardPools))
require.NoError(t, e.Close(context.Background()))
})
@@ -277,7 +235,6 @@ func TestReload(t *testing.T) {
// removed one
require.Equal(t, shardNum-1, len(e.shards))
- require.Equal(t, shardNum-1, len(e.shardPools))
require.NoError(t, e.Close(context.Background()))
})
@@ -302,7 +259,8 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
meta.WithEpochState(epochState{}),
),
}
- })
+ }).
+ prepare(t)
e, ids := te.engine, te.shardIDs
for _, id := range ids {
@@ -310,10 +268,6 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
}
require.Equal(t, num, len(e.shards))
- require.Equal(t, num, len(e.shardPools))
-
- require.NoError(t, e.Open(context.Background()))
- require.NoError(t, e.Init(context.Background()))
return e, currShards
}
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 318f938fb..223cdbc48 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -24,9 +23,6 @@ type DeletePrm struct {
forceRemoval bool
}
-// DeleteRes groups the resulting values of Delete operation.
-type DeleteRes struct{}
-
// WithAddress is a Delete option to set the addresses of the objects to delete.
//
// Option is required.
@@ -51,27 +47,21 @@ func (p *DeletePrm) WithForceRemoval() {
// NOTE: Marks any object to be deleted (despite any prohibitions
// on operations with that object) if WithForceRemoval option has
// been provided.
-func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) {
+func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
attribute.Bool("force_removal", prm.forceRemoval),
))
defer span.End()
+ defer elapsed("Delete", e.metrics.AddMethodDuration)()
- err = e.execIfNotBlocked(func() error {
- res, err = e.delete(ctx, prm)
- return err
+ return e.execIfNotBlocked(func() error {
+ return e.delete(ctx, prm)
})
-
- return
}
-func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
- if e.metrics != nil {
- defer elapsed("Delete", e.metrics.AddMethodDuration)()
- }
-
+func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
var locked struct {
is bool
}
@@ -81,7 +71,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e
// Removal of a big object is done in multiple stages:
// 1. Remove the parent object. If it is locked or already removed, return immediately.
// 2. Otherwise, search for all objects with a particular SplitID and delete them too.
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
+ if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) {
var existsPrm shard.ExistsPrm
existsPrm.Address = prm.addr
@@ -100,7 +90,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e
return false
} else {
if !client.IsErrObjectNotFound(err) {
- e.reportShardError(sh, "could not check object existence", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(ctx, sh, "could not check object existence", err, zap.Stringer("address", prm.addr))
}
return false
}
@@ -116,7 +106,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e
_, err = sh.Inhume(ctx, shPrm)
if err != nil {
- e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr))
var target *apistatus.ObjectLocked
locked.is = errors.As(err, &target)
@@ -126,39 +116,40 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e
// If a parent object is removed we should set GC mark on each shard.
return splitInfo == nil
- })
+ }); err != nil {
+ return err
+ }
if locked.is {
- return DeleteRes{}, new(apistatus.ObjectLocked)
+ return new(apistatus.ObjectLocked)
}
if splitInfo != nil {
- e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
+ return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
}
- return DeleteRes{}, nil
+ return nil
}
-func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) {
+func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error {
var fs objectSDK.SearchFilters
fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID)
var selectPrm shard.SelectPrm
selectPrm.SetFilters(fs)
- selectPrm.SetContainerID(addr.Container())
+ selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID
var inhumePrm shard.InhumePrm
if force {
inhumePrm.ForceRemoval()
}
- e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
+ return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Select(ctx, selectPrm)
if err != nil {
- e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
+ e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return false
}
@@ -167,10 +158,9 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
_, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
- e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
+ e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
- zap.String("err", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
continue
}
}
@@ -191,16 +181,15 @@ func (e *StorageEngine) deleteChunks(
var objID oid.ID
err := objID.ReadFromV2(chunk.ID)
if err != nil {
- e.reportShardError(sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(ctx, sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr))
}
addr.SetObject(objID)
inhumePrm.MarkAsGarbage(addr)
_, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
- e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
+ e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
- zap.String("err", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
continue
}
}
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index 4a6758012..a56598c09 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -49,18 +48,13 @@ func TestDeleteBigObject(t *testing.T) {
link.SetSplitID(splitID)
link.SetChildren(childIDs...)
- s1 := testNewShard(t)
- s2 := testNewShard(t)
- s3 := testNewShard(t)
-
- e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine
- e.log = test.NewLogger(t)
- defer e.Close(context.Background())
+ e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i]))
+ require.NoError(t, Put(context.Background(), e, children[i], false))
}
- require.NoError(t, Put(context.Background(), e, link))
+ require.NoError(t, Put(context.Background(), e, link, false))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
@@ -76,8 +70,7 @@ func TestDeleteBigObject(t *testing.T) {
deletePrm.WithForceRemoval()
deletePrm.WithAddress(addrParent)
- _, err := e.Delete(context.Background(), deletePrm)
- require.NoError(t, err)
+ require.NoError(t, e.Delete(context.Background(), deletePrm))
checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
@@ -119,16 +112,18 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
link.SetSplitID(splitID)
link.SetChildren(childIDs...)
- s1 := testNewShard(t, shard.WithDisabledGC())
+ te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
+ return []shard.Option{shard.WithDisabledGC()}
+ }).prepare(t)
+ e := te.engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
- e := testNewEngine(t).setInitializedShards(t, s1).engine
- e.log = test.NewLogger(t)
- defer e.Close(context.Background())
+ s1 := te.shards[0]
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i]))
+ require.NoError(t, Put(context.Background(), e, children[i], false))
}
- require.NoError(t, Put(context.Background(), e, link))
+ require.NoError(t, Put(context.Background(), e, link, false))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
@@ -145,8 +140,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
deletePrm.WithForceRemoval()
deletePrm.WithAddress(addrParent)
- _, err := e.Delete(context.Background(), deletePrm)
- require.NoError(t, err)
+ require.NoError(t, e.Delete(context.Background(), deletePrm))
checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
@@ -157,7 +151,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
// delete physical
var delPrm shard.DeletePrm
delPrm.SetAddresses(addrParent)
- _, err = s1.Delete(context.Background(), delPrm)
+ _, err := s1.Delete(context.Background(), delPrm)
require.NoError(t, err)
delPrm.SetAddresses(addrLink)
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 5e883a641..376d545d3 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -12,8 +12,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
@@ -28,16 +28,13 @@ type StorageEngine struct {
shards map[string]hashedShard
- shardPools map[string]util.WorkerPool
-
closeCh chan struct{}
setModeCh chan setModeRequest
wg sync.WaitGroup
blockExec struct {
- mtx sync.RWMutex
-
- err error
+ mtx sync.RWMutex
+ closed bool
}
evacuateLimiter *evacuationLimiter
}
@@ -55,7 +52,7 @@ type setModeRequest struct {
// setModeLoop listens setModeCh to perform degraded mode transition of a single shard.
// Instead of creating a worker per single shard we use a single goroutine.
-func (e *StorageEngine) setModeLoop() {
+func (e *StorageEngine) setModeLoop(ctx context.Context) {
defer e.wg.Done()
var (
@@ -75,7 +72,7 @@ func (e *StorageEngine) setModeLoop() {
if !ok {
inProgress[sid] = struct{}{}
go func() {
- e.moveToDegraded(r.sh, r.errorCount, r.isMeta)
+ e.moveToDegraded(ctx, r.sh, r.errorCount, r.isMeta)
mtx.Lock()
delete(inProgress, sid)
@@ -87,7 +84,7 @@ func (e *StorageEngine) setModeLoop() {
}
}
-func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta bool) {
+func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, errCount uint32, isMeta bool) {
sid := sh.ID()
log := e.log.With(
zap.Stringer("shard_id", sid),
@@ -97,28 +94,26 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta
defer e.mtx.RUnlock()
if isMeta {
- err := sh.SetMode(mode.DegradedReadOnly)
+ err := sh.SetMode(ctx, mode.DegradedReadOnly)
if err == nil {
- log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
+ log.Info(ctx, logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
return
}
- log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
+ log.Error(ctx, logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
zap.Error(err))
}
- err := sh.SetMode(mode.ReadOnly)
+ err := sh.SetMode(ctx, mode.ReadOnly)
if err != nil {
- log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
+ log.Error(ctx, logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
return
}
- log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
+ log.Info(ctx, logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
}
-// reportShardErrorBackground increases shard error counter and logs an error.
-// It is intended to be used from background workers and
-// doesn't change shard mode because of possible deadlocks.
-func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err error) {
+// reportShardErrorByID increases shard error counter and logs an error.
+func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg string, err error) {
e.mtx.RLock()
sh, ok := e.shards[id]
e.mtx.RUnlock()
@@ -127,50 +122,33 @@ func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err er
return
}
- if isLogical(err) {
- e.log.Warn(msg,
- zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()))
- return
- }
-
- errCount := sh.errorCount.Add(1)
- sh.Shard.IncErrorCounter()
- e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err)
+ e.reportShardError(ctx, sh, msg, err)
}
// reportShardError checks that the amount of errors doesn't exceed the configured threshold.
// If it does, shard is set to read-only mode.
func (e *StorageEngine) reportShardError(
+ ctx context.Context,
sh hashedShard,
msg string,
err error,
fields ...zap.Field,
) {
if isLogical(err) {
- e.log.Warn(msg,
+ e.log.Warn(ctx, msg,
zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()))
+ zap.Error(err))
return
}
errCount := sh.errorCount.Add(1)
- sh.Shard.IncErrorCounter()
- e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err, fields...)
-}
+ e.metrics.IncErrorCounter(sh.ID().String())
-func (e *StorageEngine) reportShardErrorWithFlags(
- sh *shard.Shard,
- errCount uint32,
- msg string,
- err error,
- fields ...zap.Field,
-) {
sid := sh.ID()
- e.log.Warn(msg, append([]zap.Field{
+ e.log.Warn(ctx, msg, append([]zap.Field{
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
- zap.String("error", err.Error()),
+ zap.Error(err),
}, fields...)...)
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {
@@ -179,7 +157,7 @@ func (e *StorageEngine) reportShardErrorWithFlags(
req := setModeRequest{
errorCount: errCount,
- sh: sh,
+ sh: sh.Shard,
isMeta: errors.As(err, new(metaerr.Error)),
}
@@ -188,14 +166,17 @@ func (e *StorageEngine) reportShardErrorWithFlags(
default:
// For background workers we can have a lot of such errors,
// thus logging is done with DEBUG level.
- e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
+ e.log.Debug(ctx, logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
zap.Stringer("shard_id", sid),
zap.Uint32("error_count", errCount))
}
}
func isLogical(err error) bool {
- return errors.As(err, &logicerr.Logical{}) || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
+ return errors.As(err, &logicerr.Logical{}) ||
+ errors.Is(err, context.Canceled) ||
+ errors.Is(err, context.DeadlineExceeded) ||
+ errors.As(err, new(*apistatus.ResourceExhausted))
}
// Option represents StorageEngine's constructor option.
@@ -208,8 +189,6 @@ type cfg struct {
metrics MetricRegister
- shardPoolSize uint32
-
lowMem bool
containerSource atomic.Pointer[containerSource]
@@ -217,8 +196,8 @@ type cfg struct {
func defaultCfg() *cfg {
res := &cfg{
- log: &logger.Logger{Logger: zap.L()},
- shardPoolSize: 20,
+ log: logger.NewLoggerWrapper(zap.L()),
+ metrics: noopMetrics{},
}
res.containerSource.Store(&containerSource{})
return res
@@ -232,13 +211,18 @@ func New(opts ...Option) *StorageEngine {
opts[i](c)
}
+ evLimMtx := &sync.RWMutex{}
+ evLimCond := sync.NewCond(evLimMtx)
+
return &StorageEngine{
- cfg: c,
- shards: make(map[string]hashedShard),
- shardPools: make(map[string]util.WorkerPool),
- closeCh: make(chan struct{}),
- setModeCh: make(chan setModeRequest),
- evacuateLimiter: &evacuationLimiter{},
+ cfg: c,
+ shards: make(map[string]hashedShard),
+ closeCh: make(chan struct{}),
+ setModeCh: make(chan setModeRequest),
+ evacuateLimiter: &evacuationLimiter{
+ guard: evLimMtx,
+ statusCond: evLimCond,
+ },
}
}
@@ -255,13 +239,6 @@ func WithMetrics(v MetricRegister) Option {
}
}
-// WithShardPoolSize returns option to specify size of worker pool for each shard.
-func WithShardPoolSize(sz uint32) Option {
- return func(c *cfg) {
- c.shardPoolSize = sz
- }
-}
-
// WithErrorThreshold returns an option to specify size amount of errors after which
// shard is moved to read-only mode.
func WithErrorThreshold(sz uint32) Option {
@@ -297,7 +274,7 @@ func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (
return true, nil
}
- wasRemoved, err := container.WasRemoved(s.cs, id)
+ wasRemoved, err := container.WasRemoved(ctx, s.cs, id)
if err != nil {
return false, err
}
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index 525e17f34..fc6d9ee9c 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -2,141 +2,111 @@ package engine
import (
"context"
+ "fmt"
"path/filepath"
- "sync/atomic"
+ "runtime/debug"
+ "strings"
+ "sync"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "git.frostfs.info/TrueCloudLab/hrw"
- "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
)
-type epochState struct{}
+type epochState struct {
+ currEpoch uint64
+}
func (s epochState) CurrentEpoch() uint64 {
- return 0
-}
-
-func BenchmarkExists(b *testing.B) {
- b.Run("2 shards", func(b *testing.B) {
- benchmarkExists(b, 2)
- })
- b.Run("4 shards", func(b *testing.B) {
- benchmarkExists(b, 4)
- })
- b.Run("8 shards", func(b *testing.B) {
- benchmarkExists(b, 8)
- })
-}
-
-func benchmarkExists(b *testing.B, shardNum int) {
- shards := make([]*shard.Shard, shardNum)
- for i := range shardNum {
- shards[i] = testNewShard(b)
- }
-
- e := testNewEngine(b).setInitializedShards(b, shards...).engine
- defer func() { require.NoError(b, e.Close(context.Background())) }()
-
- addr := oidtest.Address()
- for range 100 {
- obj := testutil.GenerateObjectWithCID(cidtest.ID())
- err := Put(context.Background(), e, obj)
- if err != nil {
- b.Fatal(err)
- }
- }
-
- b.ReportAllocs()
- b.ResetTimer()
- for range b.N {
- var shPrm shard.ExistsPrm
- shPrm.Address = addr
- shPrm.ParentAddress = oid.Address{}
- ok, _, err := e.exists(context.Background(), shPrm)
- if err != nil || ok {
- b.Fatalf("%t %v", ok, err)
- }
- }
+ return s.currEpoch
}
type testEngineWrapper struct {
engine *StorageEngine
+ shards []*shard.Shard
shardIDs []*shard.ID
}
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
- engine := New(WithLogger(test.NewLogger(t)))
- for _, opt := range opts {
- opt(engine.cfg)
- }
- return &testEngineWrapper{
- engine: engine,
- }
-}
-
-func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper {
- for _, s := range shards {
- pool, err := ants.NewPool(10, ants.WithNonblocking(true))
- require.NoError(t, err)
-
- te.engine.shards[s.ID().String()] = hashedShard{
- shardWrapper: shardWrapper{
- errorCount: new(atomic.Uint32),
- Shard: s,
- },
- hash: hrw.StringHash(s.ID().String()),
- }
- te.engine.shardPools[s.ID().String()] = pool
- te.shardIDs = append(te.shardIDs, s.ID())
- }
- return te
+ opts = append(testGetDefaultEngineOptions(t), opts...)
+ return &testEngineWrapper{engine: New(opts...)}
}
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
- shards := make([]*shard.Shard, 0, num)
-
- for range num {
- shards = append(shards, testNewShard(t))
- }
-
- return te.setInitializedShards(t, shards...)
+ return te.setShardsNumOpts(t, num, func(_ int) []shard.Option {
+ return testGetDefaultShardOptions(t)
+ })
}
-func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
+func (te *testEngineWrapper) setShardsNumOpts(
+ t testing.TB, num int, shardOpts func(id int) []shard.Option,
+) *testEngineWrapper {
+ te.shards = make([]*shard.Shard, num)
+ te.shardIDs = make([]*shard.ID, num)
for i := range num {
- opts := shardOpts(i)
- id, err := te.engine.AddShard(context.Background(), opts...)
+ shard, err := te.engine.createShard(context.Background(), shardOpts(i))
require.NoError(t, err)
- te.shardIDs = append(te.shardIDs, id)
+ require.NoError(t, te.engine.addShard(shard))
+ te.shards[i] = shard
+ te.shardIDs[i] = shard.ID()
}
+ require.Len(t, te.engine.shards, num)
return te
}
-func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
- for i := range num {
- defaultOpts := testDefaultShardOptions(t)
- opts := append(defaultOpts, shardOpts(i)...)
- id, err := te.engine.AddShard(context.Background(), opts...)
- require.NoError(t, err)
- te.shardIDs = append(te.shardIDs, id)
- }
+func (te *testEngineWrapper) setShardsNumAdditionalOpts(
+ t testing.TB, num int, shardOpts func(id int) []shard.Option,
+) *testEngineWrapper {
+ return te.setShardsNumOpts(t, num, func(id int) []shard.Option {
+ return append(testGetDefaultShardOptions(t), shardOpts(id)...)
+ })
+}
+
+// prepare calls Open and Init on the created engine.
+func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper {
+ require.NoError(t, te.engine.Open(context.Background()))
+ require.NoError(t, te.engine.Init(context.Background()))
return te
}
+func testGetDefaultEngineOptions(t testing.TB) []Option {
+ return []Option{
+ WithLogger(test.NewLogger(t)),
+ }
+}
+
+func testGetDefaultShardOptions(t testing.TB) []shard.Option {
+ return []shard.Option{
+ shard.WithLogger(test.NewLogger(t)),
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages(
+ newStorages(t, t.TempDir(), 1<<20)),
+ blobstor.WithLogger(test.NewLogger(t)),
+ ),
+ shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
+ shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...),
+ shard.WithLimiter(&testQoSLimiter{t: t}),
+ }
+}
+
+func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option {
+ return []meta.Option{
+ meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
+ meta.WithPermissions(0o700),
+ meta.WithEpochState(epochState{}),
+ meta.WithLogger(test.NewLogger(t)),
+ }
+}
+
func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
return []blobstor.SubStorage{
{
@@ -146,7 +116,8 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1),
blobovniczatree.WithPermissions(0o700),
- blobovniczatree.WithLogger(test.NewLogger(t))),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < smallSize
},
@@ -187,33 +158,77 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
}, smallFileStorage, largeFileStorage
}
-func testNewShard(t testing.TB, opts ...shard.Option) *shard.Shard {
- sid, err := generateShardID()
- require.NoError(t, err)
+var _ qos.Limiter = (*testQoSLimiter)(nil)
- shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t)...)
- s := shard.New(append(shardOpts, opts...)...)
-
- require.NoError(t, s.Open(context.Background()))
- require.NoError(t, s.Init(context.Background()))
-
- return s
+type testQoSLimiter struct {
+ t testing.TB
+ quard sync.Mutex
+ id int64
+ readStacks map[int64][]byte
+ writeStacks map[int64][]byte
}
-func testDefaultShardOptions(t testing.TB) []shard.Option {
- return []shard.Option{
- shard.WithLogger(test.NewLogger(t)),
- shard.WithBlobStorOptions(
- blobstor.WithStorages(
- newStorages(t, t.TempDir(), 1<<20)),
- blobstor.WithLogger(test.NewLogger(t)),
- ),
- shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
- meta.WithPermissions(0o700),
- meta.WithEpochState(epochState{}),
- meta.WithLogger(test.NewLogger(t)),
- ),
+func (t *testQoSLimiter) SetMetrics(qos.Metrics) {}
+
+func (t *testQoSLimiter) Close() {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ var sb strings.Builder
+ var seqN int
+ for _, stack := range t.readStacks {
+ seqN++
+ sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack)))
}
+ for _, stack := range t.writeStacks {
+ seqN++
+ sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack)))
+ }
+ require.True(t.t, seqN == 0, sb.String())
}
+
+func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ stack := debug.Stack()
+
+ t.id++
+ id := t.id
+
+ if t.readStacks == nil {
+ t.readStacks = make(map[int64][]byte)
+ }
+ t.readStacks[id] = stack
+
+ return func() {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ delete(t.readStacks, id)
+ }, nil
+}
+
+func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ stack := debug.Stack()
+
+ t.id++
+ id := t.id
+
+ if t.writeStacks == nil {
+ t.writeStacks = make(map[int64][]byte)
+ }
+ t.writeStacks[id] = stack
+
+ return func() {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ delete(t.writeStacks, id)
+ }, nil
+}
+
+func (t *testQoSLimiter) SetParentID(string) {}
diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go
index 535435ceb..57029dd5f 100644
--- a/pkg/local_object_storage/engine/error_test.go
+++ b/pkg/local_object_storage/engine/error_test.go
@@ -46,7 +46,6 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
var testShards [2]*testShard
te := testNewEngine(t,
- WithShardPoolSize(1),
WithErrorThreshold(errThreshold),
).
setShardsNumOpts(t, 2, func(id int) []shard.Option {
@@ -67,10 +66,8 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
pilorama.WithPerm(0o700)),
}
- })
+ }).prepare(t)
e := te.engine
- require.NoError(t, e.Open(context.Background()))
- require.NoError(t, e.Init(context.Background()))
for i, id := range te.shardIDs {
testShards[i].id = id
@@ -151,17 +148,17 @@ func TestErrorReporting(t *testing.T) {
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- for i := uint32(0); i < 2; i++ {
+ for i := range uint32(2) {
_, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, false))
+ require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, false))
checkShardState(t, te.ng, te.shards[0].id, errThreshold+1, mode.ReadWrite)
- require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true))
+ require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, true))
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
require.NoError(t, te.ng.Close(context.Background()))
})
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 7bef6edfb..c08dfbf03 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -4,19 +4,20 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"strings"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -24,6 +25,16 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ // containerWorkerCountDefault is a default value of the count of
+ // concurrent container evacuation workers.
+ containerWorkerCountDefault = 10
+ // objectWorkerCountDefault is a default value of the count of
+ // concurrent object evacuation workers.
+ objectWorkerCountDefault = 10
)
var (
@@ -44,9 +55,6 @@ func (s EvacuateScope) String() string {
var sb strings.Builder
first := true
if s&EvacuateScopeObjects == EvacuateScopeObjects {
- if !first {
- sb.WriteString(";")
- }
sb.WriteString("objects")
first = false
}
@@ -77,8 +85,11 @@ type EvacuateShardPrm struct {
ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error)
TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error)
IgnoreErrors bool
- Async bool
Scope EvacuateScope
+ RepOneOnly bool
+
+ ContainerWorkerCount uint32
+ ObjectWorkerCount uint32
}
// EvacuateShardRes represents result of the EvacuateShard operation.
@@ -189,21 +200,14 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes {
return res
}
-const defaultEvacuateBatchSize = 100
-
-type pooledShard struct {
- hashedShard
- pool util.WorkerPool
-}
-
var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
// Evacuate moves data from one shard to the others.
// The shard being moved must be in read-only mode.
-func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*EvacuateShardRes, error) {
+func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return ctx.Err()
default:
}
@@ -215,7 +219,6 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate",
trace.WithAttributes(
attribute.StringSlice("shardIDs", shardIDs),
- attribute.Bool("async", prm.Async),
attribute.Bool("ignoreErrors", prm.IgnoreErrors),
attribute.Stringer("scope", prm.Scope),
))
@@ -223,7 +226,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
shards, err := e.getActualShards(shardIDs, prm)
if err != nil {
- return nil, err
+ return err
}
shardsToEvacuate := make(map[string]*shard.Shard)
@@ -236,40 +239,36 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
}
res := NewEvacuateShardRes()
- ctx = ctxOrBackground(ctx, prm.Async)
- eg, egCtx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
+ ctx = context.WithoutCancel(ctx)
+ eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
if err != nil {
- return nil, err
+ return err
}
+ var mtx sync.RWMutex
+ copyShards := func() []hashedShard {
+ mtx.RLock()
+ defer mtx.RUnlock()
+ t := slices.Clone(shards)
+ return t
+ }
eg.Go(func() error {
- return e.evacuateShards(egCtx, shardIDs, prm, res, shards, shardsToEvacuate)
+ return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate)
})
- if prm.Async {
- return nil, nil
- }
-
- return res, eg.Wait()
-}
-
-func ctxOrBackground(ctx context.Context, background bool) context.Context {
- if background {
- return context.Background()
- }
- return ctx
+ return nil
}
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
) error {
var err error
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
trace.WithAttributes(
attribute.StringSlice("shardIDs", shardIDs),
- attribute.Bool("async", prm.Async),
attribute.Bool("ignoreErrors", prm.IgnoreErrors),
attribute.Stringer("scope", prm.Scope),
+ attribute.Bool("repOneOnly", prm.RepOneOnly),
))
defer func() {
@@ -277,25 +276,51 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
e.evacuateLimiter.Complete(err)
}()
- e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
+ e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.Stringer("scope", prm.Scope))
err = e.getTotals(ctx, prm, shardsToEvacuate, res)
if err != nil {
- e.log.Error(logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
+ zap.Stringer("scope", prm.Scope))
return err
}
- for _, shardID := range shardIDs {
- if err = e.evacuateShard(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
- e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
- return err
+ ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm)
+ continueLoop := true
+ for i := 0; continueLoop && i < len(shardIDs); i++ {
+ select {
+ case <-ctx.Done():
+ continueLoop = false
+ default:
+ egShard.Go(func() error {
+ err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
}
}
+ err = egShard.Wait()
+ if err != nil {
+ err = fmt.Errorf("shard error: %w", err)
+ }
+ errContainer := egContainer.Wait()
+ errObject := egObject.Wait()
+ if errContainer != nil {
+ err = errors.Join(err, fmt.Errorf("container error: %w", errContainer))
+ }
+ if errObject != nil {
+ err = errors.Join(err, fmt.Errorf("object error: %w", errObject))
+ }
+ if err != nil {
+ e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.Stringer("scope", prm.Scope))
+ return err
+ }
- e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation,
+ e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation,
zap.Strings("shard_ids", shardIDs),
evacuationOperationLogField,
zap.Uint64("total_objects", res.ObjectsTotal()),
@@ -309,6 +334,27 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
return nil
}
+func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) (
+ context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group,
+) {
+ operationCtx, cancel := context.WithCancelCause(ctx)
+ egObject, _ := errgroup.WithContext(operationCtx)
+ objectWorkerCount := prm.ObjectWorkerCount
+ if objectWorkerCount == 0 {
+ objectWorkerCount = objectWorkerCountDefault
+ }
+ egObject.SetLimit(int(objectWorkerCount))
+ egContainer, _ := errgroup.WithContext(operationCtx)
+ containerWorkerCount := prm.ContainerWorkerCount
+ if containerWorkerCount == 0 {
+ containerWorkerCount = containerWorkerCountDefault
+ }
+ egContainer.SetLimit(int(containerWorkerCount))
+ egShard, _ := errgroup.WithContext(operationCtx)
+
+ return operationCtx, cancel, egShard, egContainer, egObject
+}
+
func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals")
defer span.End()
@@ -335,8 +381,9 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha
return nil
}
-func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+ egContainer *errgroup.Group, egObject *errgroup.Group,
) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
trace.WithAttributes(
@@ -345,11 +392,10 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E
defer span.End()
if prm.Scope.WithObjects() {
- if err := e.evacuateShardObjects(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
+ if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil {
return err
}
}
-
if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() {
if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
return err
@@ -359,44 +405,84 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E
return nil
}
-func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+ egContainer *errgroup.Group, egObject *errgroup.Group,
) error {
- var listPrm shard.ListWithCursorPrm
- listPrm.WithCount(defaultEvacuateBatchSize)
-
sh := shardsToEvacuate[shardID]
- sh.SetEvacuationInProgress(true)
-
- var c *meta.Cursor
- for {
- listPrm.WithCursor(c)
-
- // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
- // because ListWithCursor works only with the metabase.
- listRes, err := sh.ListWithCursor(ctx, listPrm)
- if err != nil {
- if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
- break
+ var cntPrm shard.IterateOverContainersPrm
+ cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error {
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+ egContainer.Go(func() error {
+ var skip bool
+ c, err := e.containerSource.Load().cs.Get(ctx, cnt)
+ if err != nil {
+ if client.IsErrContainerNotFound(err) {
+ skip = true
+ } else {
+ return err
+ }
+ }
+ if !skip && prm.RepOneOnly {
+ skip = e.isNotRepOne(c)
+ }
+ if skip {
+ countPrm := shard.CountAliveObjectsInContainerPrm{
+ ObjectType: objType,
+ ContainerID: cnt,
+ }
+ count, err := sh.CountAliveObjectsInContainer(ctx, countPrm)
+ if err != nil {
+ return err
+ }
+ res.objSkipped.Add(count)
+ return nil
+ }
+ var objPrm shard.IterateOverObjectsInContainerPrm
+ objPrm.ObjectType = objType
+ objPrm.ContainerID = cnt
+ objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+ egObject.Go(func() error {
+ err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
+ return nil
+ }
+ err = sh.IterateOverObjectsInContainer(ctx, objPrm)
+ if err != nil {
+ cancel(err)
}
- e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
- }
-
- if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, shardsToEvacuate); err != nil {
- return err
- }
-
- c = listRes.Cursor()
+ })
+ return nil
}
- return nil
+
+ sh.SetEvacuationInProgress(true)
+ err := sh.IterateOverContainers(ctx, cntPrm)
+ if err != nil {
+ cancel(err)
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField)
+ }
+ return err
}
func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
) error {
sh := shardsToEvacuate[shardID]
+ shards := getShards()
var listPrm pilorama.TreeListTreesPrm
first := true
@@ -423,7 +509,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string,
}
func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID,
- prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees",
trace.WithAttributes(
@@ -443,39 +529,39 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
return err
}
if success {
- e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedLocal,
+ e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID),
- evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ evacuationOperationLogField)
res.trEvacuated.Add(1)
continue
}
moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm)
if err != nil {
- e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree,
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return err
}
if moved {
- e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedRemote,
+ e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID),
zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK),
- evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ evacuationOperationLogField)
res.trEvacuated.Add(1)
} else if prm.IgnoreErrors {
res.trFailed.Add(1)
- e.log.Warn(logs.EngineShardsEvacuationFailedToMoveTree,
+ e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
} else {
- e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree,
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID)
}
}
@@ -484,14 +570,14 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) {
if prm.TreeHandler == nil {
- return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
+ return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
}
return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh)
}
func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID,
- prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
) (bool, string, error) {
target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate)
if err != nil {
@@ -561,15 +647,15 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar
// findShardToEvacuateTree returns first shard according HRW or first shard with tree exists.
func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
-) (pooledShard, bool, error) {
+ shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+) (hashedShard, bool, error) {
hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString()))
- var result pooledShard
+ var result hashedShard
var found bool
for _, target := range shards {
select {
case <-ctx.Done():
- return pooledShard{}, false, ctx.Err()
+ return hashedShard{}, false, ctx.Err()
default:
}
@@ -597,7 +683,7 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora
return result, found, nil
}
-func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) {
+func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) {
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -627,84 +713,85 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
// We must have all shards, to have correct information about their
// indexes in a sorted slice and set appropriate marks in the metabase.
// Evacuated shard is skipped during put.
- shards := make([]pooledShard, 0, len(e.shards))
+ shards := make([]hashedShard, 0, len(e.shards))
for id := range e.shards {
- shards = append(shards, pooledShard{
- hashedShard: hashedShard(e.shards[id]),
- pool: e.shardPools[id],
- })
+ shards = append(shards, e.shards[id])
}
return shards, nil
}
-func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
+ getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container,
) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects",
- trace.WithAttributes(
- attribute.Int("objects_count", len(toEvacuate)),
- ))
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
defer span.End()
- for i := range toEvacuate {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- addr := toEvacuate[i].Address
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
- var getPrm shard.GetPrm
- getPrm.SetAddress(addr)
- getPrm.SkipEvacCheck(true)
+ shards := getShards()
+ addr := objInfo.Address
- getRes, err := sh.Get(ctx, getPrm)
- if err != nil {
- if prm.IgnoreErrors {
- res.objFailed.Add(1)
- continue
- }
- e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- return err
- }
+ var getPrm shard.GetPrm
+ getPrm.SetAddress(addr)
+ getPrm.SkipEvacCheck(true)
- evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, shardsToEvacuate, res)
- if err != nil {
- return err
- }
-
- if evacuatedLocal {
- continue
- }
-
- if prm.ObjectsHandler == nil {
- // Do not check ignoreErrors flag here because
- // ignoring errors on put make this command kinda useless.
- return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i])
- }
-
- moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
- if err != nil {
- e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- return err
- }
- if moved {
- res.objEvacuated.Add(1)
- } else if prm.IgnoreErrors {
+ getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm)
+ if err != nil {
+ if prm.IgnoreErrors {
res.objFailed.Add(1)
- e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- } else {
- return fmt.Errorf("object %s was not replicated", addr)
+ return nil
}
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
+ return err
+ }
+
+ evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr)
+ if err != nil {
+ return err
+ }
+
+ if evacuatedLocal {
+ return nil
+ }
+
+ if prm.ObjectsHandler == nil {
+ // Do not check ignoreErrors flag here because
+ // ignoring errors on put make this command kinda useless.
+ return fmt.Errorf("%w: %s", errPutShard, objInfo)
+ }
+
+ moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
+ if err != nil {
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
+ return err
+ }
+ if moved {
+ res.objEvacuated.Add(1)
+ } else if prm.IgnoreErrors {
+ res.objFailed.Add(1)
+ e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
+ } else {
+ return fmt.Errorf("object %s was not replicated", addr)
}
return nil
}
+func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
+ p := c.Value.PlacementPolicy()
+ for i := range p.NumberOfReplicas() {
+ if p.ReplicaDescriptor(i).NumberOfObjects() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes,
+ shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container,
) (bool, error) {
hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString()))
for j := range shards {
@@ -717,15 +804,14 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add
if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
continue
}
- switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object).status {
+ switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status {
case putToShardSuccess:
res.objEvacuated.Add(1)
- e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
+ e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard,
zap.Stringer("from", sh.ID()),
zap.Stringer("to", shards[j].ID()),
zap.Stringer("addr", addr),
- evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ evacuationOperationLogField)
return true, nil
case putToShardExists, putToShardRemoved:
res.objSkipped.Add(1)
diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go
index 1e6b9ccb1..b75e8686d 100644
--- a/pkg/local_object_storage/engine/evacuate_limiter.go
+++ b/pkg/local_object_storage/engine/evacuate_limiter.go
@@ -3,6 +3,7 @@ package engine
import (
"context"
"fmt"
+ "slices"
"sync"
"time"
@@ -94,8 +95,7 @@ func (s *EvacuationState) StartedAt() *time.Time {
if s == nil {
return nil
}
- defaultTime := time.Time{}
- if s.startedAt == defaultTime {
+ if s.startedAt.IsZero() {
return nil
}
return &s.startedAt
@@ -105,8 +105,7 @@ func (s *EvacuationState) FinishedAt() *time.Time {
if s == nil {
return nil
}
- defaultTime := time.Time{}
- if s.finishedAt == defaultTime {
+ if s.finishedAt.IsZero() {
return nil
}
return &s.finishedAt
@@ -123,8 +122,7 @@ func (s *EvacuationState) DeepCopy() *EvacuationState {
if s == nil {
return nil
}
- shardIDs := make([]string, len(s.shardIDs))
- copy(shardIDs, s.shardIDs)
+ shardIDs := slices.Clone(s.shardIDs)
return &EvacuationState{
shardIDs: shardIDs,
@@ -141,7 +139,8 @@ type evacuationLimiter struct {
eg *errgroup.Group
cancel context.CancelFunc
- guard sync.RWMutex
+ guard *sync.RWMutex
+ statusCond *sync.Cond // used in unit tests
}
func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) {
@@ -167,6 +166,7 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res
startedAt: time.Now().UTC(),
result: result,
}
+ l.statusCond.Broadcast()
return l.eg, egCtx, nil
}
@@ -182,6 +182,7 @@ func (l *evacuationLimiter) Complete(err error) {
l.state.processState = EvacuateProcessStateCompleted
l.state.errMessage = errMsq
l.state.finishedAt = time.Now().UTC()
+ l.statusCond.Broadcast()
l.eg = nil
}
@@ -216,6 +217,7 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error {
l.state = EvacuationState{}
l.eg = nil
l.cancel = nil
+ l.statusCond.Broadcast()
return nil
}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 28529fab9..f2ba7d994 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -6,9 +6,12 @@ import (
"fmt"
"path/filepath"
"strconv"
+ "sync"
+ "sync/atomic"
"testing"
"time"
+ coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
@@ -18,14 +21,38 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
+type containerStorage struct {
+ cntmap map[cid.ID]*container.Container
+ latency time.Duration
+}
+
+func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) {
+ time.Sleep(cs.latency)
+ v, ok := cs.cntmap[id]
+ if !ok {
+ return nil, new(apistatus.ContainerNotFound)
+ }
+ coreCnt := coreContainer.Container{
+ Value: *v,
+ }
+ return &coreCnt, nil
+}
+
+func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
+ return nil, nil
+}
+
func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) {
dir := t.TempDir()
@@ -48,10 +75,9 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
pilorama.WithPerm(0o700),
),
}
- })
+ }).
+ prepare(t)
e, ids := te.engine, te.shardIDs
- require.NoError(t, e.Open(context.Background()))
- require.NoError(t, e.Init(context.Background()))
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))
treeID := "version"
@@ -59,10 +85,15 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
{Key: pilorama.AttributeVersion, Value: []byte("XXX")},
{Key: pilorama.AttributeFilename, Value: []byte("file.txt")},
}
-
+ cnrMap := make(map[cid.ID]*container.Container)
for _, sh := range ids {
- for range objPerShard {
+ for i := range objPerShard {
+ // Create dummy container
+ cnr1 := container.Container{}
+ cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i))
contID := cidtest.ID()
+ cnrMap[contID] = &cnr1
+
obj := testutil.GenerateObjectWithCID(contID)
objects = append(objects, obj)
@@ -76,6 +107,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
require.NoError(t, err)
}
}
+ e.SetContainerSource(&containerStorage{cntmap: cnrMap})
return e, ids, objects
}
@@ -108,16 +140,17 @@ func TestEvacuateShardObjects(t *testing.T) {
prm.Scope = EvacuateScopeObjects
t.Run("must be read-only", func(t *testing.T) {
- res, err := e.Evacuate(context.Background(), prm)
+ err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, ErrMustBeReadOnly)
- require.Equal(t, uint64(0), res.ObjectsEvacuated())
})
- require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
- res, err := e.Evacuate(context.Background(), prm)
+ err := e.Evacuate(context.Background(), prm)
require.NoError(t, err)
- require.Equal(t, uint64(objPerShard), res.ObjectsEvacuated())
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, st.ErrorMessage(), "")
+ require.Equal(t, uint64(objPerShard), st.ObjectsEvacuated())
// We check that all objects are available both before and after shard removal.
// First case is a real-world use-case. It ensures that an object can be put in presense
@@ -154,33 +187,46 @@ func TestEvacuateShardObjects(t *testing.T) {
}
// Calling it again is OK, but all objects are already moved, so no new PUTs should be done.
- res, err = e.Evacuate(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, uint64(0), res.ObjectsEvacuated())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st = testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, st.ErrorMessage(), "")
+ require.Equal(t, uint64(0), st.ObjectsEvacuated())
checkHasObjects(t)
e.mtx.Lock()
delete(e.shards, evacuateShardID)
- delete(e.shardPools, evacuateShardID)
e.mtx.Unlock()
checkHasObjects(t)
}
+func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState {
+ var st *EvacuationState
+ var err error
+ e.evacuateLimiter.waitForCompleted()
+ st, err = e.GetEvacuationState(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
+ return st
+}
+
func TestEvacuateObjectsNetwork(t *testing.T) {
t.Parallel()
errReplication := errors.New("handler error")
acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
- var n uint64
+ var n atomic.Uint64
+ var mtx sync.Mutex
return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- if n == max {
+ mtx.Lock()
+ defer mtx.Unlock()
+ if n.Load() == max {
return false, errReplication
}
- n++
+ n.Add(1)
for i := range objects {
if addr == objectCore.AddressOf(objects[i]) {
require.Equal(t, objects[i], obj)
@@ -201,21 +247,21 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
evacuateShardID := ids[0].String()
- require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
var prm EvacuateShardPrm
prm.ShardID = ids[0:1]
prm.Scope = EvacuateScopeObjects
- res, err := e.Evacuate(context.Background(), prm)
+ err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, errMustHaveTwoShards)
- require.Equal(t, uint64(0), res.ObjectsEvacuated())
prm.ObjectsHandler = acceptOneOf(objects, 2)
- res, err = e.Evacuate(context.Background(), prm)
- require.ErrorIs(t, err, errReplication)
- require.Equal(t, uint64(2), res.ObjectsEvacuated())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Contains(t, st.ErrorMessage(), errReplication.Error())
+ require.Equal(t, uint64(2), st.ObjectsEvacuated())
})
t.Run("multiple shards, evacuate one", func(t *testing.T) {
t.Parallel()
@@ -224,24 +270,26 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
var prm EvacuateShardPrm
prm.ShardID = ids[1:2]
prm.ObjectsHandler = acceptOneOf(objects, 2)
prm.Scope = EvacuateScopeObjects
- res, err := e.Evacuate(context.Background(), prm)
- require.ErrorIs(t, err, errReplication)
- require.Equal(t, uint64(2), res.ObjectsEvacuated())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Contains(t, st.ErrorMessage(), errReplication.Error())
+ require.Equal(t, uint64(2), st.ObjectsEvacuated())
t.Run("no errors", func(t *testing.T) {
prm.ObjectsHandler = acceptOneOf(objects, 3)
- res, err := e.Evacuate(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, uint64(3), res.ObjectsEvacuated())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, st.ErrorMessage(), "")
+ require.Equal(t, uint64(3), st.ObjectsEvacuated())
})
})
t.Run("multiple shards, evacuate many", func(t *testing.T) {
@@ -262,7 +310,7 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
}
for i := range ids {
- require.NoError(t, e.shards[ids[i].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[i].String()].SetMode(context.Background(), mode.ReadOnly))
}
var prm EvacuateShardPrm
@@ -270,16 +318,18 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
prm.ObjectsHandler = acceptOneOf(objects, totalCount-1)
prm.Scope = EvacuateScopeObjects
- res, err := e.Evacuate(context.Background(), prm)
- require.ErrorIs(t, err, errReplication)
- require.Equal(t, totalCount-1, res.ObjectsEvacuated())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Contains(t, st.ErrorMessage(), errReplication.Error())
+ require.Equal(t, totalCount-1, st.ObjectsEvacuated())
t.Run("no errors", func(t *testing.T) {
prm.ObjectsHandler = acceptOneOf(objects, totalCount)
- res, err := e.Evacuate(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, totalCount, res.ObjectsEvacuated())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, st.ErrorMessage(), "")
+ require.Equal(t, totalCount, st.ObjectsEvacuated())
})
})
}
@@ -291,8 +341,8 @@ func TestEvacuateCancellation(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
var prm EvacuateShardPrm
prm.ShardID = ids[1:2]
@@ -309,9 +359,39 @@ func TestEvacuateCancellation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
- res, err := e.Evacuate(ctx, prm)
+ err := e.Evacuate(ctx, prm)
require.ErrorContains(t, err, "context canceled")
- require.Equal(t, uint64(0), res.ObjectsEvacuated())
+}
+
+func TestEvacuateCancellationByError(t *testing.T) {
+ t.Parallel()
+ e, ids, _ := newEngineEvacuate(t, 2, 10)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[1:2]
+ var once atomic.Bool
+ prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ var err error
+ flag := true
+ if once.CompareAndSwap(false, true) {
+ err = errors.New("test error")
+ flag = false
+ }
+ return flag, err
+ }
+ prm.Scope = EvacuateScopeObjects
+ prm.ObjectWorkerCount = 2
+ prm.ContainerWorkerCount = 2
+
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Contains(t, st.ErrorMessage(), "test error")
}
func TestEvacuateSingleProcess(t *testing.T) {
@@ -320,11 +400,11 @@ func TestEvacuateSingleProcess(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
- blocker := make(chan interface{})
- running := make(chan interface{})
+ blocker := make(chan any)
+ running := make(chan any)
var prm EvacuateShardPrm
prm.ShardID = ids[1:2]
@@ -341,20 +421,19 @@ func TestEvacuateSingleProcess(t *testing.T) {
eg, egCtx := errgroup.WithContext(context.Background())
eg.Go(func() error {
- res, err := e.Evacuate(egCtx, prm)
- require.NoError(t, err, "first evacuation failed")
- require.Equal(t, uint64(3), res.ObjectsEvacuated())
+ require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed")
return nil
})
eg.Go(func() error {
<-running
- res, err := e.Evacuate(egCtx, prm)
- require.ErrorContains(t, err, "evacuate is already running for shard ids", "second evacuation not failed")
- require.Equal(t, uint64(0), res.ObjectsEvacuated())
+ require.ErrorContains(t, e.Evacuate(egCtx, prm), "evacuate is already running for shard ids", "second evacuation not failed")
close(blocker)
return nil
})
require.NoError(t, eg.Wait())
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, uint64(3), st.ObjectsEvacuated())
+ require.Equal(t, st.ErrorMessage(), "")
}
func TestEvacuateObjectsAsync(t *testing.T) {
@@ -363,11 +442,11 @@ func TestEvacuateObjectsAsync(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
- blocker := make(chan interface{})
- running := make(chan interface{})
+ blocker := make(chan any)
+ running := make(chan any)
var prm EvacuateShardPrm
prm.ShardID = ids[1:2]
@@ -393,9 +472,9 @@ func TestEvacuateObjectsAsync(t *testing.T) {
eg, egCtx := errgroup.WithContext(context.Background())
eg.Go(func() error {
- res, err := e.Evacuate(egCtx, prm)
- require.NoError(t, err, "first evacuation failed")
- require.Equal(t, uint64(3), res.ObjectsEvacuated())
+ require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed")
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
return nil
})
@@ -418,12 +497,7 @@ func TestEvacuateObjectsAsync(t *testing.T) {
close(blocker)
- require.Eventually(t, func() bool {
- st, err = e.GetEvacuationState(context.Background())
- return st.ProcessingStatus() == EvacuateProcessStateCompleted
- }, 3*time.Second, 10*time.Millisecond, "invalid final state")
-
- require.NoError(t, err, "get final state failed")
+ st = testWaitForEvacuationCompleted(t, e)
require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
require.NotNil(t, st.StartedAt(), "invalid final started at")
require.NotNil(t, st.FinishedAt(), "invalid final finished at")
@@ -449,7 +523,7 @@ func TestEvacuateTreesLocal(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
var prm EvacuateShardPrm
prm.ShardID = ids[0:1]
@@ -469,14 +543,9 @@ func TestEvacuateTreesLocal(t *testing.T) {
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
- res, err := e.Evacuate(context.Background(), prm)
- require.NotNil(t, res, "sync evacuation result must be not nil")
- require.NoError(t, err, "evacuation failed")
-
- st, err = e.GetEvacuationState(context.Background())
- require.NoError(t, err, "get evacuation state failed")
- require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
+ require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed")
+ st = testWaitForEvacuationCompleted(t, e)
require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count")
require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count")
require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
@@ -528,9 +597,10 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ mutex := sync.Mutex{}
evacuatedTreeOps := make(map[string][]*pilorama.Move)
var prm EvacuateShardPrm
prm.ShardID = ids
@@ -545,7 +615,9 @@ func TestEvacuateTreesRemote(t *testing.T) {
if op.Time == 0 {
return true, "", nil
}
+ mutex.Lock()
evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op)
+ mutex.Unlock()
height = op.Time + 1
}
}
@@ -564,15 +636,9 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
- res, err := e.Evacuate(context.Background(), prm)
- require.NotNil(t, res, "sync evacuation must return not nil")
- require.NoError(t, err, "evacuation failed")
+ require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed")
+ st = testWaitForEvacuationCompleted(t, e)
- st, err = e.GetEvacuationState(context.Background())
- require.NoError(t, err, "get evacuation state failed")
- require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
-
- require.NoError(t, err, "get final state failed")
require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count")
require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count")
require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
@@ -605,3 +671,157 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.Equal(t, expectedTreeOps, evacuatedTreeOps)
}
+
+func TestEvacuateShardObjectsRepOneOnly(t *testing.T) {
+ e, ids, _ := newEngineEvacuate(t, 2, 0)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ // Create container with policy REP 2
+ cnr1 := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(2)
+ p1.AddReplicas(x1)
+ x1 = netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(1)
+ p1.AddReplicas(x1)
+ cnr1.SetPlacementPolicy(p1)
+ cnr1.SetAttribute("cnr", "cnr1")
+
+ var idCnr1 cid.ID
+ container.CalculateID(&idCnr1, cnr1)
+
+ cnrmap := make(map[cid.ID]*container.Container)
+ var cids []cid.ID
+ cnrmap[idCnr1] = &cnr1
+ cids = append(cids, idCnr1)
+
+ // Create container with policy REP 1
+ cnr2 := container.Container{}
+ p2 := netmap.PlacementPolicy{}
+ p2.SetContainerBackupFactor(1)
+ x2 := netmap.ReplicaDescriptor{}
+ x2.SetNumberOfObjects(1)
+ p2.AddReplicas(x2)
+ x2 = netmap.ReplicaDescriptor{}
+ x2.SetNumberOfObjects(1)
+ p2.AddReplicas(x2)
+ cnr2.SetPlacementPolicy(p2)
+ cnr2.SetAttribute("cnr", "cnr2")
+
+ var idCnr2 cid.ID
+ container.CalculateID(&idCnr2, cnr2)
+ cnrmap[idCnr2] = &cnr2
+ cids = append(cids, idCnr2)
+
+ // Create container for simulate removing
+ cnr3 := container.Container{}
+ p3 := netmap.PlacementPolicy{}
+ p3.SetContainerBackupFactor(1)
+ x3 := netmap.ReplicaDescriptor{}
+ x3.SetNumberOfObjects(1)
+ p3.AddReplicas(x3)
+ cnr3.SetPlacementPolicy(p3)
+ cnr3.SetAttribute("cnr", "cnr3")
+
+ var idCnr3 cid.ID
+ container.CalculateID(&idCnr3, cnr3)
+ cids = append(cids, idCnr3)
+
+ e.SetContainerSource(&containerStorage{cntmap: cnrmap})
+
+ for _, sh := range ids {
+ for j := range 3 {
+ for range 4 {
+ obj := testutil.GenerateObjectWithCID(cids[j])
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+ }
+ }
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
+ prm.RepOneOnly = true
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, "", st.ErrorMessage())
+ require.Equal(t, uint64(4), st.ObjectsEvacuated())
+ require.Equal(t, uint64(8), st.ObjectsSkipped())
+ require.Equal(t, uint64(0), st.ObjectsFailed())
+}
+
+func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) {
+ t.Skip()
+ e, ids, _ := newEngineEvacuate(t, 2, 0)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ cnrmap := make(map[cid.ID]*container.Container)
+ var cids []cid.ID
+ // Create containers with policy REP 1
+ for i := range 10_000 {
+ cnr1 := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(2)
+ p1.AddReplicas(x1)
+ cnr1.SetPlacementPolicy(p1)
+ cnr1.SetAttribute("i", strconv.Itoa(i))
+
+ var idCnr1 cid.ID
+ container.CalculateID(&idCnr1, cnr1)
+
+ cnrmap[idCnr1] = &cnr1
+ cids = append(cids, idCnr1)
+ }
+
+ e.SetContainerSource(&containerStorage{
+ cntmap: cnrmap,
+ latency: time.Millisecond * 100,
+ })
+
+ for _, cnt := range cids {
+ for range 1 {
+ obj := testutil.GenerateObjectWithCID(cnt)
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+ }
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
+ prm.RepOneOnly = true
+ prm.ContainerWorkerCount = 10
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ start := time.Now()
+ err := e.Evacuate(context.Background(), prm)
+ testWaitForEvacuationCompleted(t, e)
+ t.Logf("evacuate took %v\n", time.Since(start))
+ require.NoError(t, err)
+}
+
+func (l *evacuationLimiter) waitForCompleted() {
+ l.guard.Lock()
+ defer l.guard.Unlock()
+
+ for l.state.processState != EvacuateProcessStateCompleted {
+ l.statusCond.Wait()
+ }
+}
diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go
index d98101306..7dac9eb97 100644
--- a/pkg/local_object_storage/engine/exists.go
+++ b/pkg/local_object_storage/engine/exists.go
@@ -18,7 +18,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
exists := false
locked := false
- e.iterateOverSortedShards(shPrm.Address, func(_ int, sh hashedShard) (stop bool) {
+ if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Exists(ctx, shPrm)
if err != nil {
if client.IsErrObjectAlreadyRemoved(err) {
@@ -37,7 +37,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
}
if !client.IsErrObjectNotFound(err) {
- e.reportShardError(sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address))
+ e.reportShardError(ctx, sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address))
}
return false
}
@@ -50,7 +50,9 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
}
return false
- })
+ }); err != nil {
+ return false, false, err
+ }
if alreadyRemoved {
return false, false, new(apistatus.ObjectAlreadyRemoved)
diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go
new file mode 100644
index 000000000..9b3c0833f
--- /dev/null
+++ b/pkg/local_object_storage/engine/exists_test.go
@@ -0,0 +1,51 @@
+package engine
+
+import (
+ "context"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func BenchmarkExists(b *testing.B) {
+ b.Run("2 shards", func(b *testing.B) {
+ benchmarkExists(b, 2)
+ })
+ b.Run("4 shards", func(b *testing.B) {
+ benchmarkExists(b, 4)
+ })
+ b.Run("8 shards", func(b *testing.B) {
+ benchmarkExists(b, 8)
+ })
+}
+
+func benchmarkExists(b *testing.B, shardNum int) {
+ e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine
+ defer func() { require.NoError(b, e.Close(context.Background())) }()
+
+ addr := oidtest.Address()
+ for range 100 {
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ err := Put(context.Background(), e, obj, false)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ var shPrm shard.ExistsPrm
+ shPrm.Address = addr
+ shPrm.ECParentAddress = oid.Address{}
+ ok, _, err := e.exists(context.Background(), shPrm)
+ if err != nil || ok {
+ b.Fatalf("%t %v", ok, err)
+ }
+ }
+}
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 253256c34..0694c53f3 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -56,6 +55,7 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
+ defer elapsed("Get", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
res, err = e.get(ctx, prm)
@@ -66,10 +66,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
}
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
- if e.metrics != nil {
- defer elapsed("Get", e.metrics.AddMethodDuration)()
- }
-
errNotFound := new(apistatus.ObjectNotFound)
var shPrm shard.GetPrm
@@ -82,7 +78,9 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
Engine: e,
}
- it.tryGetWithMeta(ctx)
+ if err := it.tryGetWithMeta(ctx); err != nil {
+ return GetRes{}, err
+ }
if it.SplitInfo != nil {
return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
@@ -101,17 +99,18 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
return GetRes{}, it.OutError
}
- it.tryGetFromBlobstore(ctx)
+ if err := it.tryGetFromBlobstore(ctx); err != nil {
+ return GetRes{}, err
+ }
if it.Object == nil {
return GetRes{}, it.OutError
}
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
- e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
+ e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
- zap.String("error", it.MetaError.Error()),
- zap.Stringer("address", prm.addr),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(it.MetaError),
+ zap.Stringer("address", prm.addr))
}
}
@@ -138,8 +137,8 @@ type getShardIterator struct {
ecInfoErr *objectSDK.ECInfoError
}
-func (i *getShardIterator) tryGetWithMeta(ctx context.Context) {
- i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
+func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error {
+ return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
noMeta := sh.GetMode().NoMetabase()
i.ShardPrm.SetIgnoreMeta(noMeta)
@@ -186,19 +185,19 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) {
i.ObjectExpired = true
return true
default:
- i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
+ i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
return false
}
})
}
-func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) {
+func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error {
// If the object is not found but is present in metabase,
// try to fetch it from blobstor directly. If it is found in any
// blobstor, increase the error counter for the shard which contains the meta.
i.ShardPrm.SetIgnoreMeta(true)
- i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
+ return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
if sh.GetMode().NoMetabase() {
// Already visited.
return false
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index 6857a3631..d436dd411 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -68,9 +68,7 @@ func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err
func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head")
defer span.End()
- if e.metrics != nil {
- defer elapsed("Head", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Head", e.metrics.AddMethodDuration)()
var (
head *objectSDK.Object
@@ -84,7 +82,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error)
shPrm.SetAddress(prm.addr)
shPrm.SetRaw(prm.raw)
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
+ if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) {
shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold
res, err := sh.Head(ctx, shPrm)
if err != nil {
@@ -119,13 +117,15 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error)
outError = new(apistatus.ObjectNotFound)
return true
default:
- e.reportShardError(sh, "could not head object from shard", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(ctx, sh, "could not head object from shard", err, zap.Stringer("address", prm.addr))
return false
}
}
head = res.Object()
return true
- })
+ }); err != nil {
+ return HeadRes{}, err
+ }
if head != nil {
return HeadRes{head: head}, nil
diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go
index 5afc50f07..f9db81f16 100644
--- a/pkg/local_object_storage/engine/head_test.go
+++ b/pkg/local_object_storage/engine/head_test.go
@@ -39,11 +39,11 @@ func TestHeadRaw(t *testing.T) {
link.SetSplitID(splitID)
t.Run("virtual object split in different shards", func(t *testing.T) {
- s1 := testNewShard(t)
- s2 := testNewShard(t)
+ te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
+ e := te.engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
- e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
- defer e.Close(context.Background())
+ s1, s2 := te.shards[0], te.shards[1]
var putPrmLeft shard.PutPrm
putPrmLeft.SetObject(child)
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index 683713f94..e5f7072e2 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -27,9 +26,6 @@ type InhumePrm struct {
forceRemoval bool
}
-// InhumeRes encapsulates results of inhume operation.
-type InhumeRes struct{}
-
// WithTarget sets a list of objects that should be inhumed and tombstone address
// as the reason for inhume operation.
//
@@ -67,21 +63,20 @@ var errInhumeFailure = errors.New("inhume operation failed")
// with that object) if WithForceRemoval option has been provided.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
+func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume")
defer span.End()
+ defer elapsed("Inhume", e.metrics.AddMethodDuration)()
- err = e.execIfNotBlocked(func() error {
- res, err = e.inhume(ctx, prm)
- return err
+ return e.execIfNotBlocked(func() error {
+ return e.inhume(ctx, prm)
})
-
- return
}
-func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
- if e.metrics != nil {
- defer elapsed("Inhume", e.metrics.AddMethodDuration)()
+func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error {
+ addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval)
+ if err != nil {
+ return err
}
var shPrm shard.InhumePrm
@@ -89,105 +84,205 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
shPrm.ForceRemoval()
}
- for i := range prm.addrs {
- if !prm.forceRemoval {
- locked, err := e.IsLocked(ctx, prm.addrs[i])
- if err != nil {
- e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
- zap.Error(err),
- zap.Stringer("addr", prm.addrs[i]),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- } else if locked {
- return InhumeRes{}, new(apistatus.ObjectLocked)
- }
- }
-
+ for shardID, addrs := range addrsPerShard {
if prm.tombstone != nil {
- shPrm.SetTarget(*prm.tombstone, prm.addrs[i])
+ shPrm.SetTarget(*prm.tombstone, addrs...)
} else {
- shPrm.MarkAsGarbage(prm.addrs[i])
+ shPrm.MarkAsGarbage(addrs...)
}
- ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, true)
- if err != nil {
- return InhumeRes{}, err
+ sh, exists := e.shards[shardID]
+ if !exists {
+ e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard,
+ zap.Error(errors.New("this shard was expected to exist")),
+ zap.String("shard_id", shardID),
+ )
+ return errInhumeFailure
}
- if !ok {
- ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, false)
- if err != nil {
- return InhumeRes{}, err
- } else if !ok {
- return InhumeRes{}, errInhumeFailure
- }
+
+ if _, err := sh.Inhume(ctx, shPrm); err != nil {
+ e.reportInhumeError(ctx, err, sh)
+ return err
}
}
- return InhumeRes{}, nil
+ return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm)
}
-// Returns ok if object was inhumed during this invocation or before.
-func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) {
- root := false
- var existPrm shard.ExistsPrm
- var retErr error
- var ok bool
+func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) {
+ if err == nil {
+ return
+ }
- e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
- defer func() {
- // if object is root we continue since information about it
- // can be presented in other shards
- if checkExists && root {
- stop = false
- }
- }()
+ var errLocked *apistatus.ObjectLocked
+ switch {
+ case errors.As(err, &errLocked):
+ case errors.Is(err, shard.ErrLockObjectRemoval):
+ case errors.Is(err, shard.ErrReadOnlyMode):
+ case errors.Is(err, shard.ErrDegradedMode):
+ default:
+ e.reportShardError(ctx, hs, "couldn't inhume object in shard", err)
+ }
+}
- if checkExists {
- existPrm.Address = addr
- exRes, err := sh.Exists(ctx, existPrm)
- if err != nil {
- if client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err) {
- // inhumed once - no need to be inhumed again
- ok = true
- return true
- }
+// inhumeNotFoundObjects removes object which are not found on any shard.
+//
+// Besides an object not being found on any shard, it is also important to
+// remove it anyway in order to populate the metabase indexes because they are
+// responsible for the correct object status, i.e., the status will be `object
+// not found` without the indexes, the status will be `object is already
+// removed` with the indexes.
+//
+// It is suggested to evenly remove those objects on each shard with the batch
+// size equal to 1 + floor(number of objects / number of shards).
+func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error {
+ if len(addrs) == 0 {
+ return nil
+ }
- var siErr *objectSDK.SplitInfoError
- var ecErr *objectSDK.ECInfoError
- if !(errors.As(err, &siErr) || errors.As(err, &ecErr)) {
- e.reportShardError(sh, "could not check for presents in shard", err, zap.Stringer("address", addr))
- return
- }
+ var shPrm shard.InhumePrm
+ if prm.forceRemoval {
+ shPrm.ForceRemoval()
+ }
- root = true
- } else if !exRes.Exists() {
- return
- }
+ numObjectsPerShard := 1 + len(addrs)/len(e.shards)
+
+ var inhumeErr error
+ itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) {
+ numObjects := min(numObjectsPerShard, len(addrs))
+
+ if numObjects == 0 {
+ return true
}
- _, err := sh.Inhume(ctx, prm)
+ if prm.tombstone != nil {
+ shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...)
+ } else {
+ shPrm.MarkAsGarbage(addrs[:numObjects]...)
+ }
+ addrs = addrs[numObjects:]
+
+ _, inhumeErr = hs.Inhume(ctx, shPrm)
+ e.reportInhumeError(ctx, inhumeErr, hs)
+ return inhumeErr != nil
+ })
+ if inhumeErr != nil {
+ return inhumeErr
+ }
+ return itErr
+}
+
+// groupObjectsByShard groups objects based on the shard(s) they are stored on.
+//
+// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
+// the objects are locked.
+//
+// Returns two sets of objects: found objects which are grouped per shard and
+// not found object. Not found objects are objects which are not found on any
+// shard. This can happen if a node is a container node but doesn't participate
+// in a replica group of the object.
+func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) {
+ groups = make(map[string][]oid.Address)
+
+ var ids []string
+ for _, addr := range addrs {
+ ids, err = e.findShards(ctx, addr, checkLocked)
if err != nil {
- var errLocked *apistatus.ObjectLocked
- switch {
- case errors.As(err, &errLocked):
+ return
+ }
+
+ if len(ids) == 0 {
+ notFoundObjects = append(notFoundObjects, addr)
+ continue
+ }
+
+ for _, id := range ids {
+ groups[id] = append(groups[id], addr)
+ }
+ }
+
+ return
+}
+
+// findShards determines the shard(s) where the object is stored.
+//
+// If the object is a root object, multiple shards will be returned.
+//
+// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
+// the objects are locked.
+func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) {
+ var (
+ ids []string
+ retErr error
+
+ prm shard.ExistsPrm
+
+ siErr *objectSDK.SplitInfoError
+ ecErr *objectSDK.ECInfoError
+
+ isRootObject bool
+ objectExists bool
+ )
+
+ if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
+ objectExists = false
+
+ prm.Address = addr
+ switch res, err := sh.Exists(ctx, prm); {
+ case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err):
+ // NOTE(@a-savchuk): there were some considerations that we can stop
+ // immediately if the object is already removed or expired. However,
+ // the previous method behavior was:
+ // - keep iterating if it's a root object and already removed,
+ // - stop iterating if it's not a root object and removed.
+ //
+ // Since my task was only improving method speed, let's keep the
+ // previous method behavior. Continue if it's a root object.
+ return !isRootObject
+ case errors.As(err, &siErr) || errors.As(err, &ecErr):
+ isRootObject = true
+ objectExists = true
+ case err != nil:
+ e.reportShardError(
+ ctx, sh, "couldn't check for presence in shard",
+ err, zap.Stringer("address", addr),
+ )
+ case res.Exists():
+ objectExists = true
+ default:
+ }
+
+ if checkLocked {
+ if isLocked, err := sh.IsLocked(ctx, addr); err != nil {
+ e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck,
+ zap.Error(err),
+ zap.Stringer("address", addr),
+ )
+ } else if isLocked {
retErr = new(apistatus.ObjectLocked)
return true
- case errors.Is(err, shard.ErrLockObjectRemoval):
- retErr = meta.ErrLockObjectRemoval
- return true
- case errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, shard.ErrDegradedMode):
- retErr = err
- return true
}
-
- e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", addr))
- return false
}
- ok = true
- return true
- })
+ // This exit point must come after checking if the object is locked,
+ // since the locked index may be populated even if the object doesn't
+ // exist.
+ if !objectExists {
+ return
+ }
- return ok, retErr
+ ids = append(ids, sh.ID().String())
+
+ // Continue if it's a root object.
+ return !isRootObject
+ }); err != nil {
+ return nil, err
+ }
+
+ if retErr != nil {
+ return nil, retErr
+ }
+ return ids, nil
}
// IsLocked checks whether an object is locked according to StorageEngine's state.
@@ -202,17 +297,18 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
var err error
var outErr error
- e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
- locked, err = h.Shard.IsLocked(ctx, addr)
+ if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) {
+ locked, err = h.IsLocked(ctx, addr)
if err != nil {
- e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("address", addr),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr))
outErr = err
return false
}
return locked
- })
+ }); err != nil {
+ return false, err
+ }
if locked {
return locked, nil
@@ -221,94 +317,99 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
return locked, outErr
}
-// GetLocked return lock id's if object is locked according to StorageEngine's state.
-func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocked",
+// GetLocks return lock id's if object is locked according to StorageEngine's state.
+func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
defer span.End()
- var locked []oid.ID
+ var allLocks []oid.ID
var outErr error
- e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
- ld, err := h.Shard.GetLocked(ctx, addr)
+ if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) {
+ locks, err := h.GetLocks(ctx, addr)
if err != nil {
- e.reportShardError(h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr))
outErr = err
}
- locked = append(locked, ld...)
+ allLocks = append(allLocks, locks...)
return false
- })
- if len(locked) > 0 {
- return locked, nil
+ }); err != nil {
+ return nil, err
}
- return locked, outErr
+ if len(allLocks) > 0 {
+ return allLocks, nil
+ }
+ return allLocks, outErr
}
func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
sh.HandleExpiredTombstones(ctx, addrs)
select {
case <-ctx.Done():
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err()))
return true
default:
return false
}
- })
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err))
+ }
}
func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
sh.HandleExpiredLocks(ctx, epoch, lockers)
select {
case <-ctx.Done():
- e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
return true
default:
return false
}
- })
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err))
+ }
}
func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) {
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- sh.HandleDeletedLocks(lockers)
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ sh.HandleDeletedLocks(ctx, lockers)
select {
case <-ctx.Done():
- e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
return true
default:
return false
}
- })
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err))
+ }
}
func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) {
if len(ids) == 0 {
return
}
-
idMap, err := e.selectNonExistentIDs(ctx, ids)
if err != nil {
return
}
-
if len(idMap) == 0 {
return
}
-
var failed bool
var prm shard.ContainerSizePrm
- e.iterateOverUnsortedShards(func(sh hashedShard) bool {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
select {
case <-ctx.Done():
- e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
failed = true
return true
default:
@@ -317,9 +418,9 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
var drop []cid.ID
for id := range idMap {
prm.SetContainerID(id)
- s, err := sh.ContainerSize(prm)
+ s, err := sh.ContainerSize(ctx, prm)
if err != nil {
- e.log.Warn(logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
failed = true
return true
}
@@ -332,16 +433,18 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
}
return len(idMap) == 0
- })
-
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err))
+ return
+ }
if failed || len(idMap) == 0 {
return
}
- e.iterateOverUnsortedShards(func(sh hashedShard) bool {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
select {
case <-ctx.Done():
- e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
failed = true
return true
default:
@@ -349,19 +452,20 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
for id := range idMap {
if err := sh.DeleteContainerSize(ctx, id); err != nil {
- e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
failed = true
return true
}
}
return false
- })
-
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err))
+ return
+ }
if failed {
return
}
-
for id := range idMap {
e.metrics.DeleteContainerSize(id.EncodeToString())
}
@@ -371,22 +475,19 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
if len(ids) == 0 {
return
}
-
idMap, err := e.selectNonExistentIDs(ctx, ids)
if err != nil {
return
}
-
if len(idMap) == 0 {
return
}
-
var failed bool
var prm shard.ContainerCountPrm
- e.iterateOverUnsortedShards(func(sh hashedShard) bool {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
select {
case <-ctx.Done():
- e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
failed = true
return true
default:
@@ -397,7 +498,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
prm.ContainerID = id
s, err := sh.ContainerCount(ctx, prm)
if err != nil {
- e.log.Warn(logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
failed = true
return true
}
@@ -410,16 +511,18 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
}
return len(idMap) == 0
- })
-
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err))
+ return
+ }
if failed || len(idMap) == 0 {
return
}
- e.iterateOverUnsortedShards(func(sh hashedShard) bool {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
select {
case <-ctx.Done():
- e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
failed = true
return true
default:
@@ -427,19 +530,20 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
for id := range idMap {
if err := sh.DeleteContainerCount(ctx, id); err != nil {
- e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
failed = true
return true
}
}
return false
- })
-
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err))
+ return
+ }
if failed {
return
}
-
for id := range idMap {
e.metrics.DeleteContainerCount(id.EncodeToString())
}
@@ -452,7 +556,7 @@ func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID)
for _, id := range ids {
isAvailable, err := cs.IsContainerAvailable(ctx, id)
if err != nil {
- e.log.Warn(logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
return nil, err
}
if isAvailable {
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index 9daa113f8..0e268cd23 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -2,14 +2,24 @@ package engine
import (
"context"
+ "fmt"
+ "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
)
func TestStorageEngine_Inhume(t *testing.T) {
@@ -37,30 +47,31 @@ func TestStorageEngine_Inhume(t *testing.T) {
t.Run("delete small object", func(t *testing.T) {
t.Parallel()
- e := testNewEngine(t).setShardsNum(t, 1).engine
- defer e.Close(context.Background())
+ e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
- err := Put(context.Background(), e, parent)
+ err := Put(context.Background(), e, parent, false)
require.NoError(t, err)
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, false, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
t.Run("delete big object", func(t *testing.T) {
t.Parallel()
- s1 := testNewShard(t)
- s2 := testNewShard(t)
- e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
- defer e.Close(context.Background())
+ te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
+ e := te.engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
+
+ s1, s2 := te.shards[0], te.shards[1]
var putChild shard.PutPrm
putChild.SetObject(child)
@@ -75,11 +86,257 @@ func TestStorageEngine_Inhume(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, false, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
}
+
+func TestStorageEngine_ECInhume(t *testing.T) {
+ parentObjectAddress := oidtest.Address()
+ containerID := parentObjectAddress.Container()
+
+ chunkObject0 := testutil.GenerateObjectWithCID(containerID)
+ chunkObject0.SetECHeader(objectSDK.NewECHeader(
+ objectSDK.ECParentInfo{
+ ID: parentObjectAddress.Object(),
+ }, 0, 4, []byte{}, 0))
+
+ chunkObject1 := testutil.GenerateObjectWithCID(containerID)
+ chunkObject1.SetECHeader(objectSDK.NewECHeader(
+ objectSDK.ECParentInfo{
+ ID: parentObjectAddress.Object(),
+ }, 1, 4, []byte{}, 0))
+
+ tombstone := objectSDK.NewTombstone()
+ tombstone.SetMembers([]oid.ID{parentObjectAddress.Object()})
+ payload, err := tombstone.Marshal()
+ require.NoError(t, err)
+ tombstoneObject := testutil.GenerateObjectWithCID(containerID)
+ tombstoneObject.SetType(objectSDK.TypeTombstone)
+ tombstoneObject.SetPayload(payload)
+ tombstoneObjectAddress := object.AddressOf(tombstoneObject)
+
+ e := testNewEngine(t).setShardsNum(t, 5).prepare(t).engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
+
+ require.NoError(t, Put(context.Background(), e, chunkObject0, false))
+
+ require.NoError(t, Put(context.Background(), e, tombstoneObject, false))
+
+ var inhumePrm InhumePrm
+ inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress)
+ err = e.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+
+ var alreadyRemoved *apistatus.ObjectAlreadyRemoved
+
+ require.ErrorAs(t, Put(context.Background(), e, chunkObject0, false), &alreadyRemoved)
+
+ require.ErrorAs(t, Put(context.Background(), e, chunkObject1, false), &alreadyRemoved)
+}
+
+func TestInhumeExpiredRegularObject(t *testing.T) {
+ t.Parallel()
+
+ const currEpoch = 42
+ const objectExpiresAfter = currEpoch - 1
+
+ engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
+ return []shard.Option{
+ shard.WithDisabledGC(),
+ shard.WithMetaBaseOptions(append(
+ testGetDefaultMetabaseOptions(t),
+ meta.WithEpochState(epochState{currEpoch}),
+ )...),
+ }
+ }).prepare(t).engine
+
+ cnr := cidtest.ID()
+
+ generateAndPutObject := func() *objectSDK.Object {
+ obj := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter))
+
+ var putPrm PutPrm
+ putPrm.Object = obj
+ require.NoError(t, engine.Put(context.Background(), putPrm))
+ return obj
+ }
+
+ t.Run("inhume with tombstone", func(t *testing.T) {
+ obj := generateAndPutObject()
+ ts := oidtest.Address()
+ ts.SetContainer(cnr)
+
+ var prm InhumePrm
+ prm.WithTarget(ts, object.AddressOf(obj))
+ err := engine.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ })
+
+ t.Run("inhume without tombstone", func(t *testing.T) {
+ obj := generateAndPutObject()
+
+ var prm InhumePrm
+ prm.MarkAsGarbage(object.AddressOf(obj))
+ err := engine.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ })
+}
+
+func BenchmarkInhumeMultipart(b *testing.B) {
+ // The benchmark result insignificantly depends on the number of shards,
+ // so do not use it as a benchmark parameter, just set it big enough.
+ numShards := 100
+
+ for numObjects := 1; numObjects <= 10000; numObjects *= 10 {
+ b.Run(
+ fmt.Sprintf("objects=%d", numObjects),
+ func(b *testing.B) {
+ benchmarkInhumeMultipart(b, numShards, numObjects)
+ },
+ )
+ }
+}
+
+func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) {
+ b.StopTimer()
+
+ engine := testNewEngine(b).
+ setShardsNum(b, numShards).prepare(b).engine
+ defer func() { require.NoError(b, engine.Close(context.Background())) }()
+
+ cnt := cidtest.ID()
+ eg := errgroup.Group{}
+
+ for range b.N {
+ addrs := make([]oid.Address, numObjects)
+
+ for i := range numObjects {
+ prm := PutPrm{}
+
+ prm.Object = objecttest.Object().Parent()
+ prm.Object.SetContainerID(cnt)
+ prm.Object.SetType(objectSDK.TypeRegular)
+
+ addrs[i] = object.AddressOf(prm.Object)
+
+ eg.Go(func() error {
+ return engine.Put(context.Background(), prm)
+ })
+ }
+ require.NoError(b, eg.Wait())
+
+ ts := oidtest.Address()
+ ts.SetContainer(cnt)
+
+ prm := InhumePrm{}
+ prm.WithTarget(ts, addrs...)
+
+ b.StartTimer()
+ err := engine.Inhume(context.Background(), prm)
+ require.NoError(b, err)
+ b.StopTimer()
+ }
+}
+
+func TestInhumeIfObjectDoesntExist(t *testing.T) {
+ const numShards = 4
+
+ engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine
+ t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) })
+
+ t.Run("inhume without tombstone", func(t *testing.T) {
+ testInhumeIfObjectDoesntExist(t, engine, false, false)
+ })
+ t.Run("inhume with tombstone", func(t *testing.T) {
+ testInhumeIfObjectDoesntExist(t, engine, true, false)
+ })
+ t.Run("force inhume", func(t *testing.T) {
+ testInhumeIfObjectDoesntExist(t, engine, false, true)
+ })
+
+ t.Run("object is locked", func(t *testing.T) {
+ t.Run("inhume without tombstone", func(t *testing.T) {
+ testInhumeLockedIfObjectDoesntExist(t, engine, false, false)
+ })
+ t.Run("inhume with tombstone", func(t *testing.T) {
+ testInhumeLockedIfObjectDoesntExist(t, engine, true, false)
+ })
+ t.Run("force inhume", func(t *testing.T) {
+ testInhumeLockedIfObjectDoesntExist(t, engine, false, true)
+ })
+ })
+}
+
+func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) {
+ t.Parallel()
+
+ object := oidtest.Address()
+ require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce))
+
+ err := testHeadObject(e, object)
+ if withTombstone {
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
+ } else {
+ require.True(t, client.IsErrObjectNotFound(err))
+ }
+}
+
+func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) {
+ t.Parallel()
+
+ object := oidtest.Address()
+ require.NoError(t, testLockObject(e, object))
+
+ err := testInhumeObject(t, e, object, withTombstone, withForce)
+ if !withForce {
+ var errLocked *apistatus.ObjectLocked
+ require.ErrorAs(t, err, &errLocked)
+ return
+ }
+ require.NoError(t, err)
+
+ err = testHeadObject(e, object)
+ if withTombstone {
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
+ } else {
+ require.True(t, client.IsErrObjectNotFound(err))
+ }
+}
+
+func testLockObject(e *StorageEngine, obj oid.Address) error {
+ return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()})
+}
+
+func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error {
+ tombstone := oidtest.Address()
+ tombstone.SetContainer(obj.Container())
+
+ // Due to the tests design it is possible to set both the options,
+ // however removal with tombstone and force removal are exclusive.
+ require.False(t, withTombstone && withForce)
+
+ var inhumePrm InhumePrm
+ if withTombstone {
+ inhumePrm.WithTarget(tombstone, obj)
+ } else {
+ inhumePrm.MarkAsGarbage(obj)
+ }
+ if withForce {
+ inhumePrm.WithForceRemoval()
+ }
+ return e.Inhume(context.Background(), inhumePrm)
+}
+
+func testHeadObject(e *StorageEngine, obj oid.Address) error {
+ var headPrm HeadPrm
+ headPrm.WithAddress(obj)
+
+ _, err := e.Head(context.Background(), headPrm)
+ return err
+}
diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go
index cb3830b7c..073248862 100644
--- a/pkg/local_object_storage/engine/list.go
+++ b/pkg/local_object_storage/engine/list.go
@@ -7,6 +7,7 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
)
// ErrEndOfListing is returned from an object listing with cursor
@@ -98,6 +99,10 @@ func (l ListWithCursorRes) Cursor() *Cursor {
// Returns ErrEndOfListing if there are no more objects to return or count
// parameter set to zero.
func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.ListWithCursor")
+ defer span.End()
+ defer elapsed("ListWithCursor", e.metrics.AddMethodDuration)()
+
result := make([]objectcore.Info, 0, prm.count)
// Set initial cursors
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index 11a6c7841..6cfa546f8 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -68,10 +68,7 @@ func TestListWithCursor(t *testing.T) {
meta.WithEpochState(epochState{}),
),
}
- }).engine
- require.NoError(t, e.Open(context.Background()))
- require.NoError(t, e.Init(context.Background()))
-
+ }).prepare(t).engine
defer func() {
require.NoError(t, e.Close(context.Background()))
}()
@@ -82,11 +79,7 @@ func TestListWithCursor(t *testing.T) {
for range tt.objectNum {
containerID := cidtest.ID()
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
-
- var prm PutPrm
- prm.WithObject(obj)
-
- err := e.Put(context.Background(), prm)
+ err := e.Put(context.Background(), PutPrm{Object: obj})
require.NoError(t, err)
expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
}
diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go
index ac8fa9c6f..3b0cf74f9 100644
--- a/pkg/local_object_storage/engine/lock.go
+++ b/pkg/local_object_storage/engine/lock.go
@@ -32,6 +32,7 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
attribute.Int("locked_count", len(locked)),
))
defer span.End()
+ defer elapsed("Lock", e.metrics.AddMethodDuration)()
return e.execIfNotBlocked(func() error {
return e.lock(ctx, idCnr, locker, locked)
@@ -40,11 +41,19 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
for i := range locked {
- switch e.lockSingle(ctx, idCnr, locker, locked[i], true) {
+ st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true)
+ if err != nil {
+ return err
+ }
+ switch st {
case 1:
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
- switch e.lockSingle(ctx, idCnr, locker, locked[i], false) {
+ st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false)
+ if err != nil {
+ return err
+ }
+ switch st {
case 1:
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
@@ -60,13 +69,13 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
// - 0: fail
// - 1: locking irregular object
// - 2: ok
-func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
+func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
var addrLocked oid.Address
addrLocked.SetContainer(idCnr)
addrLocked.SetObject(locked)
- e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) {
+ retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) {
defer func() {
// if object is root we continue since information about it
// can be presented in other shards
@@ -83,20 +92,14 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
var siErr *objectSDK.SplitInfoError
var eiErr *objectSDK.ECInfoError
if errors.As(err, &eiErr) {
- eclocked := []oid.ID{locked}
- for _, chunk := range eiErr.ECInfo().Chunks {
- var objID oid.ID
- err = objID.ReadFromV2(chunk.ID)
- if err != nil {
- e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
- zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
- return false
- }
- eclocked = append(eclocked, objID)
+ eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr)
+ if !ok {
+ return false
}
+
err = sh.Lock(ctx, idCnr, locker, eclocked)
if err != nil {
- e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return false
}
@@ -108,7 +111,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
// do not lock it
return true
}
- e.reportShardError(sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
+ e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return
}
@@ -121,7 +124,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
- e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
var errIrregular *apistatus.LockNonRegularObject
@@ -136,3 +139,18 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
})
return
}
+
+func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) {
+ eclocked := []oid.ID{locked}
+ for _, chunk := range eiErr.ECInfo().Chunks {
+ var objID oid.ID
+ err := objID.ReadFromV2(chunk.ID)
+ if err != nil {
+ e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
+ return nil, false
+ }
+ eclocked = append(eclocked, objID)
+ }
+ return eclocked, true
+}
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index 7fa7c27ef..b8c9d6b1d 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -6,12 +6,12 @@ import (
"testing"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -57,11 +57,9 @@ func TestLockUserScenario(t *testing.T) {
}),
shard.WithTombstoneSource(tss{lockerExpiresAfter}),
}
- })
+ }).
+ prepare(t)
e := testEngine.engine
- require.NoError(t, e.Open(context.Background()))
- require.NoError(t, e.Init(context.Background()))
-
defer func() { require.NoError(t, e.Close(context.Background())) }()
lockerID := oidtest.ID()
@@ -97,7 +95,7 @@ func TestLockUserScenario(t *testing.T) {
id, _ := obj.ID()
objAddr.SetObject(id)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
@@ -105,7 +103,7 @@ func TestLockUserScenario(t *testing.T) {
locker.WriteMembers([]oid.ID{id})
objectSDK.WriteLock(lockerObj, locker)
- err = Put(context.Background(), e, lockerObj)
+ err = Put(context.Background(), e, lockerObj, false)
require.NoError(t, err)
err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
@@ -116,7 +114,7 @@ func TestLockUserScenario(t *testing.T) {
inhumePrm.WithTarget(tombAddr, objAddr)
var objLockedErr *apistatus.ObjectLocked
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 4.
@@ -124,12 +122,12 @@ func TestLockUserScenario(t *testing.T) {
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
- err = Put(context.Background(), e, tombObj)
+ err = Put(context.Background(), e, tombObj, false)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.ErrorIs(t, err, meta.ErrLockObjectRemoval)
// 5.
@@ -138,7 +136,7 @@ func TestLockUserScenario(t *testing.T) {
inhumePrm.WithTarget(tombAddr, objAddr)
require.Eventually(t, func() bool {
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
return err == nil
}, 30*time.Second, time.Second)
}
@@ -162,11 +160,9 @@ func TestLockExpiration(t *testing.T) {
return pool
}),
}
- })
+ }).
+ prepare(t)
e := testEngine.engine
- require.NoError(t, e.Open(context.Background()))
- require.NoError(t, e.Init(context.Background()))
-
defer func() { require.NoError(t, e.Close(context.Background())) }()
const lockerExpiresAfter = 13
@@ -177,7 +173,7 @@ func TestLockExpiration(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
@@ -189,7 +185,7 @@ func TestLockExpiration(t *testing.T) {
lock.SetType(objectSDK.TypeLock)
lock.SetAttributes(a)
- err = Put(context.Background(), e, lock)
+ err = Put(context.Background(), e, lock, false)
require.NoError(t, err)
id, _ := obj.ID()
@@ -199,20 +195,24 @@ func TestLockExpiration(t *testing.T) {
require.NoError(t, err)
var inhumePrm InhumePrm
- inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(cnr)
+ inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
var objLockedErr *apistatus.ObjectLocked
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 3.
e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1)
// 4.
- inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
+ tombAddr = oidtest.Address()
+ tombAddr.SetContainer(cnr)
+ inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
require.Eventually(t, func() bool {
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
return err == nil
}, 30*time.Second, time.Second)
}
@@ -239,9 +239,8 @@ func TestLockForceRemoval(t *testing.T) {
}),
shard.WithDeletedLockCallback(e.processDeletedLocks),
}
- }).engine
- require.NoError(t, e.Open(context.Background()))
- require.NoError(t, e.Init(context.Background()))
+ }).
+ prepare(t).engine
defer func() { require.NoError(t, e.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -250,14 +249,14 @@ func TestLockForceRemoval(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
- err = Put(context.Background(), e, lock)
+ err = Put(context.Background(), e, lock, false)
require.NoError(t, err)
id, _ := obj.ID()
@@ -271,12 +270,12 @@ func TestLockForceRemoval(t *testing.T) {
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
var objLockedErr *apistatus.ObjectLocked
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 4.
@@ -284,12 +283,64 @@ func TestLockForceRemoval(t *testing.T) {
deletePrm.WithAddress(objectcore.AddressOf(lock))
deletePrm.WithForceRemoval()
- _, err = e.Delete(context.Background(), deletePrm)
- require.NoError(t, err)
+ require.NoError(t, e.Delete(context.Background(), deletePrm))
// 5.
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = e.Inhume(context.Background(), inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
}
+
+func TestLockExpiredRegularObject(t *testing.T) {
+ const currEpoch = 42
+ const objectExpiresAfter = currEpoch - 1
+
+ engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
+ return []shard.Option{
+ shard.WithDisabledGC(),
+ shard.WithMetaBaseOptions(append(
+ testGetDefaultMetabaseOptions(t),
+ meta.WithEpochState(epochState{currEpoch}),
+ )...),
+ }
+ }).prepare(t).engine
+
+ cnr := cidtest.ID()
+
+ object := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(object, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter))
+
+ address := objectcore.AddressOf(object)
+
+ var putPrm PutPrm
+ putPrm.Object = object
+ require.NoError(t, engine.Put(context.Background(), putPrm))
+
+ var getPrm GetPrm
+ var errNotFound *apistatus.ObjectNotFound
+
+ getPrm.WithAddress(address)
+ _, err := engine.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, &errNotFound)
+
+ t.Run("lock expired regular object", func(t *testing.T) {
+ engine.Lock(context.Background(),
+ address.Container(),
+ oidtest.ID(),
+ []oid.ID{address.Object()},
+ )
+
+ res, err := engine.IsLocked(context.Background(), objectcore.AddressOf(object))
+ require.NoError(t, err)
+ require.True(t, res)
+ })
+
+ t.Run("get expired and locked regular object", func(t *testing.T) {
+ getPrm.WithAddress(objectcore.AddressOf(object))
+
+ res, err := engine.Get(context.Background(), getPrm)
+ require.NoError(t, err)
+ require.Equal(t, res.Object(), object)
+ })
+}
diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go
index 1c088c754..963292d83 100644
--- a/pkg/local_object_storage/engine/metrics.go
+++ b/pkg/local_object_storage/engine/metrics.go
@@ -7,34 +7,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
-type MetricRegister interface {
- AddMethodDuration(method string, d time.Duration)
-
- SetObjectCounter(shardID, objectType string, v uint64)
- AddToObjectCounter(shardID, objectType string, delta int)
-
- SetMode(shardID string, mode mode.Mode)
-
- AddToContainerSize(cnrID string, size int64)
- DeleteContainerSize(cnrID string)
- DeleteContainerCount(cnrID string)
- AddToPayloadCounter(shardID string, size int64)
- IncErrorCounter(shardID string)
- ClearErrorCounter(shardID string)
- DeleteShardMetrics(shardID string)
-
- SetContainerObjectCounter(shardID, contID, objectType string, v uint64)
- IncContainerObjectCounter(shardID, contID, objectType string)
- SubContainerObjectCounter(shardID, contID, objectType string, v uint64)
-
- IncRefillObjectsCount(shardID, path string, size int, success bool)
- SetRefillPercent(shardID, path string, percent uint32)
- SetRefillStatus(shardID, path, status string)
- SetEvacuationInProgress(shardID string, value bool)
-
- WriteCache() metrics.WriteCacheMetrics
- GC() metrics.GCMetrics
-}
+type (
+ MetricRegister = metrics.EngineMetrics
+ GCMetrics = metrics.GCMetrics
+ WriteCacheMetrics = metrics.WriteCacheMetrics
+ NullBool = metrics.NullBool
+)
func elapsed(method string, addFunc func(method string, d time.Duration)) func() {
t := time.Now()
@@ -68,3 +46,48 @@ func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success
func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) {
m.storage.AddInhumedObjectCount(m.shardID, count, objectType)
}
+
+type (
+ noopMetrics struct{}
+ noopWriteCacheMetrics struct{}
+ noopGCMetrics struct{}
+)
+
+var (
+ _ MetricRegister = noopMetrics{}
+ _ WriteCacheMetrics = noopWriteCacheMetrics{}
+ _ GCMetrics = noopGCMetrics{}
+)
+
+func (noopMetrics) AddMethodDuration(string, time.Duration) {}
+func (noopMetrics) SetObjectCounter(string, string, uint64) {}
+func (noopMetrics) AddToObjectCounter(string, string, int) {}
+func (noopMetrics) SetMode(string, mode.Mode) {}
+func (noopMetrics) AddToContainerSize(string, int64) {}
+func (noopMetrics) DeleteContainerSize(string) {}
+func (noopMetrics) DeleteContainerCount(string) {}
+func (noopMetrics) AddToPayloadCounter(string, int64) {}
+func (noopMetrics) IncErrorCounter(string) {}
+func (noopMetrics) ClearErrorCounter(string) {}
+func (noopMetrics) DeleteShardMetrics(string) {}
+func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {}
+func (noopMetrics) IncContainerObjectCounter(string, string, string) {}
+func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {}
+func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {}
+func (noopMetrics) SetRefillPercent(string, string, uint32) {}
+func (noopMetrics) SetRefillStatus(string, string, string) {}
+func (noopMetrics) SetEvacuationInProgress(string, bool) {}
+func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} }
+func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} }
+
+func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {}
+func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {}
+func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {}
+func (noopWriteCacheMetrics) SetMode(string, string) {}
+func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {}
+func (noopWriteCacheMetrics) Close(string, string) {}
+
+func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {}
+func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {}
+func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {}
+func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {}
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index f92d83745..10cf5ffd5 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -9,8 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -22,7 +20,8 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
+ Object *objectSDK.Object
+ IsIndexedContainer bool
}
var errPutShard = errors.New("could not put object to any shard")
@@ -41,13 +40,6 @@ type putToShardRes struct {
err error
}
-// WithObject is a Put option to set object to save.
-//
-// Option is required.
-func (p *PutPrm) WithObject(obj *objectSDK.Object) {
- p.obj = obj
-}
-
// Put saves the object to local storage.
//
// Returns any error encountered that
@@ -59,9 +51,10 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) {
func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
trace.WithAttributes(
- attribute.String("address", object.AddressOf(prm.obj).EncodeToString()),
+ attribute.String("address", object.AddressOf(prm.Object).EncodeToString()),
))
defer span.End()
+ defer elapsed("Put", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
err = e.put(ctx, prm)
@@ -72,29 +65,25 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
}
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
- if e.metrics != nil {
- defer elapsed("Put", e.metrics.AddMethodDuration)()
- }
-
- addr := object.AddressOf(prm.obj)
+ addr := object.AddressOf(prm.Object)
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
- var parent oid.Address
- if prm.obj.ECHeader() != nil {
- parent.SetObject(prm.obj.ECHeader().Parent())
- parent.SetContainer(addr.Container())
+ var ecParent oid.Address
+ if prm.Object.ECHeader() != nil {
+ ecParent.SetObject(prm.Object.ECHeader().Parent())
+ ecParent.SetContainer(addr.Container())
}
var shPrm shard.ExistsPrm
shPrm.Address = addr
- shPrm.ParentAddress = parent
+ shPrm.ECParentAddress = ecParent
existed, locked, err := e.exists(ctx, shPrm)
if err != nil {
return err
}
if !existed && locked {
- lockers, err := e.GetLocked(ctx, parent)
+ lockers, err := e.GetLocks(ctx, ecParent)
if err != nil {
return err
}
@@ -107,17 +96,19 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
}
var shRes putToShardRes
- e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
+ if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
e.mtx.RLock()
- pool, ok := e.shardPools[sh.ID().String()]
+ _, ok := e.shards[sh.ID().String()]
e.mtx.RUnlock()
if !ok {
// Shard was concurrently removed, skip.
return false
}
- shRes = e.putToShard(ctx, sh, pool, addr, prm.obj)
+ shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer)
return shRes.status != putToShardUnknown
- })
+ }); err != nil {
+ return err
+ }
switch shRes.status {
case putToShardUnknown:
return errPutShard
@@ -132,80 +123,64 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// putToShard puts object to sh.
// Return putToShardStatus and error if it is necessary to propagate an error upper.
-func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool,
- addr oid.Address, obj *objectSDK.Object,
+func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard,
+ addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool,
) (res putToShardRes) {
- exitCh := make(chan struct{})
+ var existPrm shard.ExistsPrm
+ existPrm.Address = addr
- if err := pool.Submit(func() {
- defer close(exitCh)
-
- var existPrm shard.ExistsPrm
- existPrm.Address = addr
-
- exists, err := sh.Exists(ctx, existPrm)
- if err != nil {
- if shard.IsErrObjectExpired(err) {
- // object is already found but
- // expired => do nothing with it
- res.status = putToShardExists
- } else {
- e.log.Warn(logs.EngineCouldNotCheckObjectExistence,
- zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- }
-
- return // this is not ErrAlreadyRemoved error so we can go to the next shard
- }
-
- if exists.Exists() {
+ exists, err := sh.Exists(ctx, existPrm)
+ if err != nil {
+ if shard.IsErrObjectExpired(err) {
+ // object is already found but
+ // expired => do nothing with it
res.status = putToShardExists
- return
+ } else {
+ e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.Error(err))
}
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
-
- _, err = sh.Put(ctx, putPrm)
- if err != nil {
- if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
- errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
- e.log.Warn(logs.EngineCouldNotPutObjectToShard,
- zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- return
- }
- if client.IsErrObjectAlreadyRemoved(err) {
- e.log.Warn(logs.EngineCouldNotPutObjectToShard,
- zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- res.status = putToShardRemoved
- res.err = err
- return
- }
-
- e.reportShardError(sh, "could not put object to shard", err, zap.Stringer("address", addr))
- return
- }
-
- res.status = putToShardSuccess
- }); err != nil {
- e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Error(err))
- close(exitCh)
+ return // this is not ErrAlreadyRemoved error so we can go to the next shard
}
- <-exitCh
+ if exists.Exists() {
+ res.status = putToShardExists
+ return
+ }
+
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ putPrm.SetIndexAttributes(isIndexedContainer)
+
+ _, err = sh.Put(ctx, putPrm)
+ if err != nil {
+ if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
+ errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
+ e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.Error(err))
+ return
+ }
+ if client.IsErrObjectAlreadyRemoved(err) {
+ e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.Error(err))
+ res.status = putToShardRemoved
+ res.err = err
+ return
+ }
+
+ e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr))
+ return
+ }
+
+ res.status = putToShardSuccess
return
}
// Put writes provided object to local storage.
-func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
- var putPrm PutPrm
- putPrm.WithObject(obj)
-
- return storage.Put(ctx, putPrm)
+func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error {
+ return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer})
}
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index cbf26ff4e..7ec4742d8 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -65,6 +64,15 @@ func (r RngRes) Object() *objectSDK.Object {
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.String("offset", strconv.FormatUint(prm.off, 10)),
+ attribute.String("length", strconv.FormatUint(prm.ln, 10)),
+ ))
+ defer span.End()
+ defer elapsed("GetRange", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
res, err = e.getRange(ctx, prm)
return err
@@ -74,18 +82,6 @@ func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, e
}
func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange",
- trace.WithAttributes(
- attribute.String("address", prm.addr.EncodeToString()),
- attribute.String("offset", strconv.FormatUint(prm.off, 10)),
- attribute.String("length", strconv.FormatUint(prm.ln, 10)),
- ))
- defer span.End()
-
- if e.metrics != nil {
- defer elapsed("GetRange", e.metrics.AddMethodDuration)()
- }
-
var shPrm shard.RngPrm
shPrm.SetAddress(prm.addr)
shPrm.SetRange(prm.off, prm.ln)
@@ -97,7 +93,9 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
Engine: e,
}
- it.tryGetWithMeta(ctx)
+ if err := it.tryGetWithMeta(ctx); err != nil {
+ return RngRes{}, err
+ }
if it.SplitInfo != nil {
return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
@@ -113,17 +111,18 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
return RngRes{}, it.OutError
}
- it.tryGetFromBlobstor(ctx)
+ if err := it.tryGetFromBlobstor(ctx); err != nil {
+ return RngRes{}, err
+ }
if it.Object == nil {
return RngRes{}, it.OutError
}
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
- e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
+ e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
- zap.String("error", it.MetaError.Error()),
- zap.Stringer("address", prm.addr),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(it.MetaError),
+ zap.Stringer("address", prm.addr))
}
}
@@ -162,8 +161,8 @@ type getRangeShardIterator struct {
Engine *StorageEngine
}
-func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) {
- i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
+func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error {
+ return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
noMeta := sh.GetMode().NoMetabase()
i.HasDegraded = i.HasDegraded || noMeta
i.ShardPrm.SetIgnoreMeta(noMeta)
@@ -208,19 +207,19 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) {
return true // stop, return it back
default:
- i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
+ i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
return false
}
})
}
-func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) {
+func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error {
// If the object is not found but is present in metabase,
// try to fetch it from blobstor directly. If it is found in any
// blobstor, increase the error counter for the shard which contains the meta.
i.ShardPrm.SetIgnoreMeta(true)
- i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
+ return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
if sh.GetMode().NoMetabase() {
// Already processed it without a metabase.
return false
diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go
index 83c6a54ed..a29dd7ed9 100644
--- a/pkg/local_object_storage/engine/rebuild.go
+++ b/pkg/local_object_storage/engine/rebuild.go
@@ -4,6 +4,7 @@ import (
"context"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
@@ -41,7 +42,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes
}
resGuard := &sync.Mutex{}
- limiter := shard.NewRebuildLimiter(prm.ConcurrencyLimit)
+ concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)}
eg, egCtx := errgroup.WithContext(ctx)
for _, shardID := range prm.ShardIDs {
@@ -61,7 +62,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes
}
err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{
- ConcurrencyLimiter: limiter,
+ ConcurrencyLimiter: concLimiter,
TargetFillPercent: prm.TargetFillPercent,
})
@@ -88,3 +89,20 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes
}
return res, nil
}
+
+type concurrencyLimiter struct {
+ semaphore chan struct{}
+}
+
+func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
+ select {
+ case l.semaphore <- struct{}{}:
+ return l.releaseWorkSlot, nil
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+func (l *concurrencyLimiter) releaseWorkSlot() {
+ <-l.semaphore
+}
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index 5e1ced56a..8ab3c5217 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -43,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
prm.Concurrency = defaultRemoveDuplicatesConcurrency
}
- e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies,
+ e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies,
zap.Int("concurrency", prm.Concurrency))
// The mutext must be taken for the whole duration to avoid target shard being removed
@@ -55,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
// However we could change weights in future and easily forget this function.
for _, sh := range e.shards {
- e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
+ e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
ch := make(chan oid.Address)
errG, ctx := errgroup.WithContext(ctx)
@@ -93,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
})
}
if err := errG.Wait(); err != nil {
- e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
+ e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
return err
}
}
- e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies)
+ e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies)
return nil
}
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 6a8c9fab9..4243a5481 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -14,8 +14,9 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ indexedContainer bool
}
// SelectRes groups the resulting values of Select operation.
@@ -24,8 +25,9 @@ type SelectRes struct {
}
// WithContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) WithContainerID(cnr cid.ID) {
+func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) {
p.cnr = cnr
+ p.indexedContainer = indexedContainer
}
// WithFilters is a Select option to set the object filters.
@@ -49,33 +51,29 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe
attribute.String("container_id", prm.cnr.EncodeToString()),
))
defer span.End()
+ defer elapsed("Select", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
- res, err = e._select(ctx, prm)
- return err
+ var sErr error
+ res, sErr = e._select(ctx, prm)
+ return sErr
})
return
}
func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
- if e.metrics != nil {
- defer elapsed("Search", e.metrics.AddMethodDuration)()
- }
-
addrList := make([]oid.Address, 0)
uniqueMap := make(map[string]struct{})
- var outError error
-
var shPrm shard.SelectPrm
- shPrm.SetContainerID(prm.cnr)
+ shPrm.SetContainerID(prm.cnr, prm.indexedContainer)
shPrm.SetFilters(prm.filters)
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
res, err := sh.Select(ctx, shPrm)
if err != nil {
- e.reportShardError(sh, "could not select objects from shard", err)
+ e.reportShardError(ctx, sh, "could not select objects from shard", err)
return false
}
@@ -87,11 +85,13 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes,
}
return false
- })
+ }); err != nil {
+ return SelectRes{}, err
+ }
return SelectRes{
addrList: addrList,
- }, outError
+ }, nil
}
// List returns `limit` available physically storage object addresses in engine.
@@ -99,28 +99,26 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes,
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) {
+ defer elapsed("List", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
- res, err = e.list(ctx, limit)
- return err
+ var lErr error
+ res, lErr = e.list(ctx, limit)
+ return lErr
})
return
}
func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) {
- if e.metrics != nil {
- defer elapsed("ListObjects", e.metrics.AddMethodDuration)()
- }
-
addrList := make([]oid.Address, 0, limit)
uniqueMap := make(map[string]struct{})
ln := uint64(0)
// consider iterating over shuffled shards
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
res, err := sh.List(ctx) // consider limit result of shard iterator
if err != nil {
- e.reportShardError(sh, "could not select objects from shard", err)
+ e.reportShardError(ctx, sh, "could not select objects from shard", err)
} else {
for _, addr := range res.AddressList() { // save only unique values
if _, ok := uniqueMap[addr.EncodeToString()]; !ok {
@@ -136,7 +134,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro
}
return false
- })
+ }); err != nil {
+ return SelectRes{}, err
+ }
return SelectRes{
addrList: addrList,
@@ -144,9 +144,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro
}
// Select selects objects from local storage using provided filters.
-func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
- selectPrm.WithContainerID(cnr)
+ selectPrm.WithContainerID(cnr, isIndexedContainer)
selectPrm.WithFilters(fs)
res, err := storage.Select(ctx, selectPrm)
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 2ad6859e4..69067c500 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -11,10 +11,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
"github.com/google/uuid"
- "github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)
@@ -108,25 +110,23 @@ func (m *metricsWithID) SetEvacuationInProgress(value bool) {
func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) {
sh, err := e.createShard(ctx, opts)
if err != nil {
- return nil, fmt.Errorf("could not create a shard: %w", err)
+ return nil, fmt.Errorf("create a shard: %w", err)
}
err = e.addShard(sh)
if err != nil {
- return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err)
+ return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
}
- if e.cfg.metrics != nil {
- e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
- }
+ e.metrics.SetMode(sh.ID().String(), sh.GetMode())
return sh.ID(), nil
}
-func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*shard.Shard, error) {
+func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) {
id, err := generateShardID()
if err != nil {
- return nil, fmt.Errorf("could not generate shard ID: %w", err)
+ return nil, fmt.Errorf("generate shard ID: %w", err)
}
opts = e.appendMetrics(id, opts)
@@ -136,13 +136,13 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh
shard.WithExpiredTombstonesCallback(e.processExpiredTombstones),
shard.WithExpiredLocksCallback(e.processExpiredLocks),
shard.WithDeletedLockCallback(e.processDeletedLocks),
- shard.WithReportErrorFunc(e.reportShardErrorBackground),
+ shard.WithReportErrorFunc(e.reportShardErrorByID),
shard.WithZeroSizeCallback(e.processZeroSizeContainers),
shard.WithZeroCountCallback(e.processZeroCountContainers),
)...)
- if err := sh.UpdateID(); err != nil {
- e.log.Warn(logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
+ if err := sh.UpdateID(ctx); err != nil {
+ e.log.Warn(ctx, logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
}
return sh, nil
@@ -152,28 +152,26 @@ func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard
e.mtx.RLock()
defer e.mtx.RUnlock()
- if e.metrics != nil {
- opts = append(opts,
- shard.WithMetricsWriter(
- &metricsWithID{
- id: id.String(),
- mw: e.metrics,
- },
- ),
- shard.WithWriteCacheMetrics(
- &writeCacheMetrics{
- shardID: id.String(),
- metrics: e.metrics.WriteCache(),
- },
- ),
- shard.WithGCMetrics(
- &gcMetrics{
- storage: e.metrics.GC(),
- shardID: id.String(),
- },
- ),
- )
- }
+ opts = append(opts,
+ shard.WithMetricsWriter(
+ &metricsWithID{
+ id: id.String(),
+ mw: e.metrics,
+ },
+ ),
+ shard.WithWriteCacheMetrics(
+ &writeCacheMetrics{
+ shardID: id.String(),
+ metrics: e.metrics.WriteCache(),
+ },
+ ),
+ shard.WithGCMetrics(
+ &gcMetrics{
+ storage: e.metrics.GC(),
+ shardID: id.String(),
+ },
+ ),
+ )
return opts
}
@@ -182,11 +180,6 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
e.mtx.Lock()
defer e.mtx.Unlock()
- pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true))
- if err != nil {
- return fmt.Errorf("could not create pool: %w", err)
- }
-
strID := sh.ID().String()
if _, ok := e.shards[strID]; ok {
return fmt.Errorf("shard with id %s was already added", strID)
@@ -200,14 +193,12 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
hash: hrw.StringHash(strID),
}
- e.shardPools[strID] = pool
-
return nil
}
// removeShards removes specified shards. Skips non-existent shards.
// Logs errors about shards that it could not Close after the removal.
-func (e *StorageEngine) removeShards(ids ...string) {
+func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) {
if len(ids) == 0 {
return
}
@@ -221,33 +212,27 @@ func (e *StorageEngine) removeShards(ids ...string) {
continue
}
- sh.DeleteShardMetrics()
+ e.metrics.DeleteShardMetrics(id)
ss = append(ss, sh)
delete(e.shards, id)
- pool, ok := e.shardPools[id]
- if ok {
- pool.Release()
- delete(e.shardPools, id)
- }
-
- e.log.Info(logs.EngineShardHasBeenRemoved,
+ e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
zap.String("id", id))
}
e.mtx.Unlock()
for _, sh := range ss {
- err := sh.SetMode(mode.Disabled)
+ err := sh.SetMode(ctx, mode.Disabled)
if err != nil {
- e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
+ e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
}
- err = sh.Close()
+ err = sh.Close(ctx)
if err != nil {
- e.log.Error(logs.EngineCouldNotCloseRemovedShard,
+ e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
@@ -276,7 +261,7 @@ func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string })
h := hrw.StringHash(objAddr.EncodeToString())
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
- shards = append(shards, hashedShard(sh))
+ shards = append(shards, sh)
}
hrw.SortHasherSliceByValue(shards, h)
return shards
@@ -289,32 +274,44 @@ func (e *StorageEngine) unsortedShards() []hashedShard {
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
- shards = append(shards, hashedShard(sh))
+ shards = append(shards, sh)
}
return shards
}
-func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) {
+func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error {
for i, sh := range e.sortShards(addr) {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
if handler(i, sh) {
break
}
}
+ return nil
}
-func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) {
+func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error {
for _, sh := range e.unsortedShards() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
if handler(sh) {
break
}
}
+ return nil
}
// SetShardMode sets mode of the shard with provided identifier.
//
// Returns an error if shard mode was not set, or shard was not found in storage engine.
-func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounter bool) error {
+func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.Mode, resetErrorCounter bool) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -322,9 +319,9 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte
if id.String() == shID {
if resetErrorCounter {
sh.errorCount.Store(0)
- sh.Shard.ClearErrorCounter()
+ e.metrics.ClearErrorCounter(shID)
}
- return sh.SetMode(m)
+ return sh.SetMode(ctx, m)
}
}
@@ -333,8 +330,6 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte
// HandleNewEpoch notifies every shard about NewEpoch event.
func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
- ev := shard.EventNewEpoch(epoch)
-
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -342,54 +337,54 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
select {
case <-ctx.Done():
return
- case sh.NotificationChannel() <- ev:
+ case sh.NotificationChannel() <- epoch:
default:
- e.log.Debug(logs.ShardEventProcessingInProgress,
+ e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))
}
}
}
-func (e *StorageEngine) DetachShards(ids []*shard.ID) error {
+func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error {
if len(ids) == 0 {
return logicerr.New("ids must be non-empty")
}
- deletedShards, err := e.deleteShards(ids)
+ deletedShards, err := e.deleteShards(ctx, ids)
if err != nil {
return err
}
- return e.closeShards(deletedShards)
+ return e.closeShards(ctx, deletedShards)
}
// closeShards closes deleted shards. Tries to close all shards.
// Returns single error with joined shard errors.
-func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
+func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedShard) error {
var multiErr error
var multiErrGuard sync.Mutex
var eg errgroup.Group
for _, sh := range deletedShards {
eg.Go(func() error {
- err := sh.SetMode(mode.Disabled)
+ err := sh.SetMode(ctx, mode.Disabled)
if err != nil {
- e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
+ e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
multiErrGuard.Lock()
- multiErr = errors.Join(multiErr, fmt.Errorf("could not change shard (id:%s) mode to disabled: %w", sh.ID(), err))
+ multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err))
multiErrGuard.Unlock()
}
- err = sh.Close()
+ err = sh.Close(ctx)
if err != nil {
- e.log.Error(logs.EngineCouldNotCloseRemovedShard,
+ e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
multiErrGuard.Lock()
- multiErr = errors.Join(multiErr, fmt.Errorf("could not close removed shard (id:%s): %w", sh.ID(), err))
+ multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err))
multiErrGuard.Unlock()
}
return nil
@@ -404,7 +399,7 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
// deleteShards deletes shards with specified ids from engine shard list
// and releases all engine resources associated with shards.
// Returns deleted shards or error if some shard could not be deleted.
-func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) {
+func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]hashedShard, error) {
ss := make([]hashedShard, 0, len(ids))
e.mtx.Lock()
@@ -426,17 +421,11 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) {
for _, sh := range ss {
idStr := sh.ID().String()
- sh.DeleteShardMetrics()
+ e.metrics.DeleteShardMetrics(idStr)
delete(e.shards, idStr)
- pool, ok := e.shardPools[idStr]
- if ok {
- pool.Release()
- delete(e.shardPools, idStr)
- }
-
- e.log.Info(logs.EngineShardHasBeenRemoved,
+ e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
zap.String("id", idStr))
}
@@ -446,3 +435,48 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) {
func (s hashedShard) Hash() uint64 {
return s.hash
}
+
+func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) {
+ var err error
+ var info []shard.Info
+ prm := shard.ExistsPrm{
+ Address: obj,
+ }
+ var siErr *objectSDK.SplitInfoError
+ var ecErr *objectSDK.ECInfoError
+
+ if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) {
+ res, exErr := hs.Exists(ctx, prm)
+ if exErr != nil {
+ if client.IsErrObjectAlreadyRemoved(exErr) {
+ err = new(apistatus.ObjectAlreadyRemoved)
+ return true
+ }
+
+ // Check if error is either SplitInfoError or ECInfoError.
+ // True means the object is virtual.
+ if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) {
+ info = append(info, hs.DumpInfo())
+ return false
+ }
+
+ if shard.IsErrObjectExpired(exErr) {
+ err = exErr
+ return true
+ }
+
+ if !client.IsErrObjectNotFound(exErr) {
+ e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address))
+ }
+
+ return false
+ }
+ if res.Exists() {
+ info = append(info, hs.DumpInfo())
+ }
+ return false
+ }); itErr != nil {
+ return nil, itErr
+ }
+ return info, err
+}
diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go
index 3347d58f1..3aa9629b0 100644
--- a/pkg/local_object_storage/engine/shards_test.go
+++ b/pkg/local_object_storage/engine/shards_test.go
@@ -13,11 +13,10 @@ import (
func TestRemoveShard(t *testing.T) {
const numOfShards = 6
- te := testNewEngine(t).setShardsNum(t, numOfShards)
+ te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
e, ids := te.engine, te.shardIDs
defer func() { require.NoError(t, e.Close(context.Background())) }()
- require.Equal(t, numOfShards, len(e.shardPools))
require.Equal(t, numOfShards, len(e.shards))
removedNum := numOfShards / 2
@@ -33,11 +32,10 @@ func TestRemoveShard(t *testing.T) {
for id, remove := range mSh {
if remove {
- e.removeShards(id)
+ e.removeShards(context.Background(), id)
}
}
- require.Equal(t, numOfShards-removedNum, len(e.shardPools))
require.Equal(t, numOfShards-removedNum, len(e.shards))
for id, removed := range mSh {
@@ -51,15 +49,15 @@ func TestDisableShards(t *testing.T) {
const numOfShards = 2
- te := testNewEngine(t).setShardsNum(t, numOfShards)
+ te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
e, ids := te.engine, te.shardIDs
defer func() { require.NoError(t, e.Close(context.Background())) }()
- require.ErrorAs(t, e.DetachShards(ids), new(logicerr.Logical))
- require.ErrorAs(t, e.DetachShards(nil), new(logicerr.Logical))
- require.ErrorAs(t, e.DetachShards([]*shard.ID{}), new(logicerr.Logical))
+ require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical))
+ require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical))
+ require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical))
- require.NoError(t, e.DetachShards([]*shard.ID{ids[0]}))
+ require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]}))
require.Equal(t, 1, len(e.shards))
}
diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go
index 39122628f..cfd15b4d4 100644
--- a/pkg/local_object_storage/engine/tree.go
+++ b/pkg/local_object_storage/engine/tree.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.opentelemetry.io/otel/attribute"
@@ -37,10 +36,9 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor,
lm, err := lst[index].TreeMove(ctx, d, treeID, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(lst[index], "can't perform `TreeMove`", err,
+ e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err,
zap.Stringer("cid", d.CID),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
return nil, err
@@ -71,10 +69,9 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip
lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err,
+ e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err,
zap.Stringer("cid", d.CID),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
return nil, err
}
@@ -100,10 +97,36 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str
err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(lst[index], "can't perform `TreeApply`", err,
+ e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err,
zap.Stringer("cid", cnr),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
+ }
+ return err
+ }
+ return nil
+}
+
+// TreeApplyBatch implements the pilorama.Forest interface.
+func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApplyBatch",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, cnr, treeID)
+ if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
+ return err
+ }
+
+ err = lst[index].TreeApplyBatch(ctx, cnr, treeID, m)
+ if err != nil {
+ if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
+ e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err,
+ zap.Stringer("cid", cnr),
+ zap.String("tree", treeID))
}
return err
}
@@ -132,10 +155,9 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeGetByPath`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
continue
}
@@ -165,10 +187,9 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeGetMeta`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
continue
}
@@ -197,10 +218,9 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeGetChildren`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
continue
}
@@ -210,7 +230,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
}
// TreeSortedByFilename implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) {
+func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
@@ -221,7 +241,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID,
var err error
var nodes []pilorama.MultiNodeInfo
- var cursor *string
+ var cursor *pilorama.Cursor
for _, sh := range e.sortShards(cid) {
nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
if err != nil {
@@ -229,10 +249,9 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID,
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeSortedByFilename`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
continue
}
@@ -261,10 +280,9 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeGetOpLog`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
continue
}
@@ -291,10 +309,9 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) {
- e.reportShardError(sh, "can't perform `TreeDrop`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
continue
}
@@ -321,9 +338,8 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string,
return nil, err
}
- e.reportShardError(sh, "can't perform `TreeList`", err,
- zap.Stringer("cid", cid),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ e.reportShardError(ctx, sh, "can't perform `TreeList`", err,
+ zap.Stringer("cid", cid))
// returns as much info about
// trees as possible
@@ -387,10 +403,9 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK
err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(lst[index], "can't update tree synchronization height", err,
+ e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
return err
}
@@ -414,10 +429,9 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't read tree synchronization height", err,
+ e.reportShardError(ctx, sh, "can't read tree synchronization height", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("tree", treeID))
}
continue
}
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 6f694f082..ea0a9e74e 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -37,7 +37,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
for i := range objCount {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
- err := Put(context.Background(), te.ng, obj)
+ err := Put(context.Background(), te.ng, obj, false)
if err != nil {
b.Fatal(err)
}
@@ -50,7 +50,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
b.Run("search", func(b *testing.B) {
var prm SelectPrm
- prm.WithContainerID(cid)
+ prm.WithContainerID(cid, true)
var fs objectSDK.SearchFilters
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
diff --git a/pkg/local_object_storage/internal/log/log.go b/pkg/local_object_storage/internal/log/log.go
index 23740868d..6b101fa60 100644
--- a/pkg/local_object_storage/internal/log/log.go
+++ b/pkg/local_object_storage/internal/log/log.go
@@ -1,14 +1,16 @@
package storagelog
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
// Write writes message about storage engine's operation to logger.
-func Write(logger *logger.Logger, fields ...zap.Field) {
- logger.Debug(logs.StorageOperation, fields...)
+func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) {
+ logger.Debug(ctx, logs.StorageOperation, fields...)
}
// AddressField returns logger's field for object address.
diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go
index 586b3dcc6..d46365296 100644
--- a/pkg/local_object_storage/internal/storagetest/storage.go
+++ b/pkg/local_object_storage/internal/storagetest/storage.go
@@ -11,9 +11,9 @@ import (
// Component represents single storage component.
type Component interface {
Open(context.Context, mode.Mode) error
- SetMode(mode.Mode) error
- Init() error
- Close() error
+ SetMode(context.Context, mode.Mode) error
+ Init(context.Context) error
+ Close(context.Context) error
}
// Constructor constructs storage component.
@@ -59,18 +59,18 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) {
// Use-case: irrecoverable error on some components, close everything.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Close())
+ require.NoError(t, s.Close(context.Background()))
})
t.Run("RO", func(t *testing.T) {
// Use-case: irrecoverable error on some components, close everything.
// Open in read-only must be done after the db is here.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init())
- require.NoError(t, s.Close())
+ require.NoError(t, s.Init(context.Background()))
+ require.NoError(t, s.Close(context.Background()))
require.NoError(t, s.Open(context.Background(), mode.ReadOnly))
- require.NoError(t, s.Close())
+ require.NoError(t, s.Close(context.Background()))
})
}
@@ -79,9 +79,9 @@ func TestCloseTwice(t *testing.T, cons Constructor) {
// Use-case: move to maintenance mode twice, first time failed.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init())
- require.NoError(t, s.Close())
- require.NoError(t, s.Close()) // already closed, no-op
+ require.NoError(t, s.Init(context.Background()))
+ require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Close(context.Background())) // already closed, no-op
}
// TestSetMode checks that any mode transition can be done safely.
@@ -91,23 +91,23 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) {
// call `SetMode` on all not-yet-initialized components.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.SetMode(m))
+ require.NoError(t, s.SetMode(context.Background(), m))
t.Run("after open in RO", func(t *testing.T) {
- require.NoError(t, s.Close())
+ require.NoError(t, s.Close(context.Background()))
require.NoError(t, s.Open(context.Background(), mode.ReadOnly))
- require.NoError(t, s.SetMode(m))
+ require.NoError(t, s.SetMode(context.Background(), m))
})
- require.NoError(t, s.Close())
+ require.NoError(t, s.Close(context.Background()))
})
t.Run("after init", func(t *testing.T) {
s := cons(t)
// Use-case: notmal node operation.
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init())
- require.NoError(t, s.SetMode(m))
- require.NoError(t, s.Close())
+ require.NoError(t, s.Init(context.Background()))
+ require.NoError(t, s.SetMode(context.Background(), m))
+ require.NoError(t, s.Close(context.Background()))
})
}
@@ -115,8 +115,8 @@ func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) {
// Use-case: normal node operation.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init())
- require.NoError(t, s.SetMode(from))
- require.NoError(t, s.SetMode(to))
- require.NoError(t, s.Close())
+ require.NoError(t, s.Init(context.Background()))
+ require.NoError(t, s.SetMode(context.Background(), from))
+ require.NoError(t, s.SetMode(context.Background(), to))
+ require.NoError(t, s.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go
index 383c596af..52b199b0b 100644
--- a/pkg/local_object_storage/internal/testutil/generators.go
+++ b/pkg/local_object_storage/internal/testutil/generators.go
@@ -1,7 +1,9 @@
package testutil
import (
+ cryptorand "crypto/rand"
"encoding/binary"
+ "math/rand"
"sync/atomic"
"testing"
@@ -9,7 +11,6 @@ import (
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/rand"
)
// AddressGenerator is the interface of types that generate object addresses.
@@ -61,7 +62,7 @@ var _ ObjectGenerator = &SeqObjGenerator{}
func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object {
data := make([]byte, sz)
- _, _ = rand.Read(data)
+ _, _ = cryptorand.Read(data)
obj := GenerateObjectWithCIDWithPayload(cid, data)
obj.SetID(oid)
return obj
@@ -82,7 +83,7 @@ var _ ObjectGenerator = &RandObjGenerator{}
func (g *RandObjGenerator) Next() *objectSDK.Object {
var id oid.ID
- _, _ = rand.Read(id[:])
+ _, _ = cryptorand.Read(id[:])
return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize)
}
diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go
index 60e9211d5..1087e40be 100644
--- a/pkg/local_object_storage/internal/testutil/object.go
+++ b/pkg/local_object_storage/internal/testutil/object.go
@@ -1,6 +1,7 @@
package testutil
import (
+ "crypto/rand"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@@ -11,7 +12,6 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
- "golang.org/x/exp/rand"
)
const defaultDataSize = 32
diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go
new file mode 100644
index 000000000..de1479e6f
--- /dev/null
+++ b/pkg/local_object_storage/metabase/bucket_cache.go
@@ -0,0 +1,82 @@
+package meta
+
+import (
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "go.etcd.io/bbolt"
+)
+
+type bucketCache struct {
+ locked *bbolt.Bucket
+ graveyard *bbolt.Bucket
+ garbage *bbolt.Bucket
+ expired map[cid.ID]*bbolt.Bucket
+ primary map[cid.ID]*bbolt.Bucket
+}
+
+func newBucketCache() *bucketCache {
+ return &bucketCache{}
+}
+
+func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
+ if bc == nil {
+ return tx.Bucket(bucketNameLocked)
+ }
+ return getBucket(&bc.locked, tx, bucketNameLocked)
+}
+
+func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
+ if bc == nil {
+ return tx.Bucket(graveyardBucketName)
+ }
+ return getBucket(&bc.graveyard, tx, graveyardBucketName)
+}
+
+func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
+ if bc == nil {
+ return tx.Bucket(garbageBucketName)
+ }
+ return getBucket(&bc.garbage, tx, garbageBucketName)
+}
+
+func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket {
+ if *cache != nil {
+ return *cache
+ }
+
+ *cache = tx.Bucket(name)
+ return *cache
+}
+
+func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
+ if bc == nil {
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = objectToExpirationEpochBucketName(cnr, bucketName)
+ return tx.Bucket(bucketName)
+ }
+ return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr)
+}
+
+func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
+ if bc == nil {
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = primaryBucketName(cnr, bucketName)
+ return tx.Bucket(bucketName)
+ }
+ return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr)
+}
+
+func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket {
+ value, ok := (*m)[cnr]
+ if ok {
+ return value
+ }
+
+ if *m == nil {
+ *m = make(map[cid.ID]*bbolt.Bucket, 1)
+ }
+
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = nameFunc(cnr, bucketName)
+ (*m)[cnr] = getBucket(&value, tx, bucketName)
+ return value
+}
diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go
index 472b2affc..da27e6085 100644
--- a/pkg/local_object_storage/metabase/containers.go
+++ b/pkg/local_object_storage/metabase/containers.go
@@ -56,7 +56,7 @@ func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) {
return result, err
}
-func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) {
+func (db *DB) ContainerSize(id cid.ID) (uint64, error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -64,21 +64,22 @@ func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) {
return 0, ErrDegradedMode
}
- err = db.boltDB.View(func(tx *bbolt.Tx) error {
- size, err = db.containerSize(tx, id)
+ var size uint64
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ size = db.containerSize(tx, id)
- return err
+ return nil
})
return size, metaerr.Wrap(err)
}
-func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) (uint64, error) {
+func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 {
containerVolume := tx.Bucket(containerVolumeBucketName)
key := make([]byte, cidSize)
id.Encode(key)
- return parseContainerSize(containerVolume.Get(key)), nil
+ return parseContainerSize(containerVolume.Get(key))
}
func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool {
diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go
index 8b1874458..8d8d91dc7 100644
--- a/pkg/local_object_storage/metabase/containers_test.go
+++ b/pkg/local_object_storage/metabase/containers_test.go
@@ -18,7 +18,7 @@ func TestDB_Containers(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const N = 10
@@ -67,7 +67,7 @@ func TestDB_Containers(t *testing.T) {
assertContains(cnrs, cnr)
- require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.Address()))
+ require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID()))
cnrs, err = db.Containers(context.Background())
require.NoError(t, err)
@@ -79,7 +79,7 @@ func TestDB_ContainersCount(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const R, T, SG, L = 10, 11, 12, 13 // amount of object per type
@@ -116,7 +116,7 @@ func TestDB_ContainerSize(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const (
C = 3
@@ -164,7 +164,7 @@ func TestDB_ContainerSize(t *testing.T) {
require.NoError(t, metaInhume(
db,
object.AddressOf(obj),
- oidtest.Address(),
+ oidtest.ID(),
))
volume -= int(obj.PayloadSize())
diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go
index d6546d922..c19c65224 100644
--- a/pkg/local_object_storage/metabase/control.go
+++ b/pkg/local_object_storage/metabase/control.go
@@ -39,7 +39,7 @@ var (
)
// Open boltDB instance for metabase.
-func (db *DB) Open(_ context.Context, m mode.Mode) error {
+func (db *DB) Open(ctx context.Context, m mode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = m
@@ -48,16 +48,16 @@ func (db *DB) Open(_ context.Context, m mode.Mode) error {
if m.NoMetabase() {
return nil
}
- return db.openDB(m)
+ return db.openDB(ctx, m)
}
-func (db *DB) openDB(mode mode.Mode) error {
+func (db *DB) openDB(ctx context.Context, mode mode.Mode) error {
err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
if err != nil {
- return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
+ return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err)
}
- db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
+ db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
if db.boltOptions == nil {
opts := *bbolt.DefaultOptions
@@ -65,22 +65,22 @@ func (db *DB) openDB(mode mode.Mode) error {
}
db.boltOptions.ReadOnly = mode.ReadOnly()
- return metaerr.Wrap(db.openBolt())
+ return metaerr.Wrap(db.openBolt(ctx))
}
-func (db *DB) openBolt() error {
+func (db *DB) openBolt(ctx context.Context) error {
var err error
db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions)
if err != nil {
- return fmt.Errorf("can't open boltDB database: %w", err)
+ return fmt.Errorf("open boltDB database: %w", err)
}
db.boltDB.MaxBatchDelay = db.boltBatchDelay
db.boltDB.MaxBatchSize = db.boltBatchSize
- db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase)
+ db.log.Debug(ctx, logs.MetabaseOpenedBoltDBInstanceForMetabase)
- db.log.Debug(logs.MetabaseCheckingMetabaseVersion)
+ db.log.Debug(ctx, logs.MetabaseCheckingMetabaseVersion)
return db.boltDB.View(func(tx *bbolt.Tx) error {
// The safest way to check if the metabase is fresh is to check if it has no buckets.
// However, shard info can be present. So here we check that the number of buckets is
@@ -109,7 +109,7 @@ func (db *DB) openBolt() error {
//
// Does nothing if metabase has already been initialized and filled. To roll back the database to its initial state,
// use Reset.
-func (db *DB) Init() error {
+func (db *DB) Init(_ context.Context) error {
return metaerr.Wrap(db.init(false))
}
@@ -145,27 +145,27 @@ func (db *DB) init(reset bool) error {
if reset {
err := tx.DeleteBucket(name)
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
- return fmt.Errorf("could not delete static bucket %s: %w", k, err)
+ return fmt.Errorf("delete static bucket %s: %w", k, err)
}
}
_, err := tx.CreateBucketIfNotExists(name)
if err != nil {
- return fmt.Errorf("could not create static bucket %s: %w", k, err)
+ return fmt.Errorf("create static bucket %s: %w", k, err)
}
}
for _, b := range deprecatedBuckets {
err := tx.DeleteBucket(b)
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
- return fmt.Errorf("could not delete deprecated bucket %s: %w", string(b), err)
+ return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err)
}
}
if !reset { // counters will be recalculated by refill metabase
err = syncCounter(tx, false)
if err != nil {
- return fmt.Errorf("could not sync object counter: %w", err)
+ return fmt.Errorf("sync object counter: %w", err)
}
return nil
@@ -205,7 +205,7 @@ func (db *DB) SyncCounters() error {
// Close closes boltDB instance
// and reports metabase metric.
-func (db *DB) Close() error {
+func (db *DB) Close(context.Context) error {
var err error
if db.boltDB != nil {
err = db.close()
@@ -226,7 +226,7 @@ func (db *DB) close() error {
// If there was a problem with applying new configuration, an error is returned.
//
// If a metabase was couldn't be reopened because of an error, ErrDegradedMode is returned.
-func (db *DB) Reload(opts ...Option) (bool, error) {
+func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) {
var c cfg
for i := range opts {
opts[i](&c)
@@ -236,14 +236,14 @@ func (db *DB) Reload(opts ...Option) (bool, error) {
defer db.modeMtx.Unlock()
if db.mode.NoMetabase() || c.info.Path != "" && filepath.Clean(db.info.Path) != filepath.Clean(c.info.Path) {
- if err := db.Close(); err != nil {
+ if err := db.Close(ctx); err != nil {
return false, err
}
db.mode = mode.Disabled
db.metrics.SetMode(mode.ComponentDisabled)
db.info.Path = c.info.Path
- if err := db.openBolt(); err != nil {
+ if err := db.openBolt(ctx); err != nil {
return false, metaerr.Wrap(fmt.Errorf("%w: %v", ErrDegradedMode, err))
}
diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go
index 0354a5eb6..d26402675 100644
--- a/pkg/local_object_storage/metabase/control_test.go
+++ b/pkg/local_object_storage/metabase/control_test.go
@@ -15,7 +15,7 @@ import (
func TestReset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
err := db.Reset()
require.NoError(t, err)
@@ -41,7 +41,7 @@ func TestReset(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
- err = metaInhume(db, addrToInhume, oidtest.Address())
+ err = metaInhume(db, addrToInhume, oidtest.ID())
require.NoError(t, err)
assertExists(addr, true, nil)
diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go
index 275099ff2..732f99519 100644
--- a/pkg/local_object_storage/metabase/counter.go
+++ b/pkg/local_object_storage/metabase/counter.go
@@ -238,26 +238,26 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
}
if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
- return fmt.Errorf("could not increase phy object counter: %w", err)
+ return fmt.Errorf("increase phy object counter: %w", err)
}
if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
- return fmt.Errorf("could not increase logical object counter: %w", err)
+ return fmt.Errorf("increase logical object counter: %w", err)
}
if isUserObject {
if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
- return fmt.Errorf("could not increase user object counter: %w", err)
+ return fmt.Errorf("increase user object counter: %w", err)
}
}
return db.incContainerObjectCounter(tx, cnrID, isUserObject)
}
-func (db *DB) updateShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool) error {
+func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error {
b := tx.Bucket(shardInfoBucket)
if b == nil {
return nil
}
- return db.updateShardObjectCounterBucket(b, typ, delta, inc)
+ return db.updateShardObjectCounterBucket(b, typ, delta, false)
}
func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error {
@@ -362,7 +362,7 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject
func syncCounter(tx *bbolt.Tx, force bool) error {
shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket)
if err != nil {
- return fmt.Errorf("could not get shard info bucket: %w", err)
+ return fmt.Errorf("get shard info bucket: %w", err)
}
shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 &&
len(shardInfoB.Get(objectLogicCounterKey)) == 8 &&
@@ -375,7 +375,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error {
containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName)
if err != nil {
- return fmt.Errorf("could not get container counter bucket: %w", err)
+ return fmt.Errorf("get container counter bucket: %w", err)
}
var addr oid.Address
@@ -428,7 +428,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error {
return nil
})
if err != nil {
- return fmt.Errorf("could not iterate objects: %w", err)
+ return fmt.Errorf("iterate objects: %w", err)
}
return setObjectCounters(counters, shardInfoB, containerCounterB)
@@ -448,7 +448,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
value := containerCounterValue(count)
err := containerCounterB.Put(key, value)
if err != nil {
- return fmt.Errorf("could not update phy container object counter: %w", err)
+ return fmt.Errorf("update phy container object counter: %w", err)
}
}
phyData := make([]byte, 8)
@@ -456,7 +456,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err := shardInfoB.Put(objectPhyCounterKey, phyData)
if err != nil {
- return fmt.Errorf("could not update phy object counter: %w", err)
+ return fmt.Errorf("update phy object counter: %w", err)
}
logData := make([]byte, 8)
@@ -464,7 +464,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err = shardInfoB.Put(objectLogicCounterKey, logData)
if err != nil {
- return fmt.Errorf("could not update logic object counter: %w", err)
+ return fmt.Errorf("update logic object counter: %w", err)
}
userData := make([]byte, 8)
@@ -472,7 +472,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err = shardInfoB.Put(objectUserCounterKey, userData)
if err != nil {
- return fmt.Errorf("could not update user object counter: %w", err)
+ return fmt.Errorf("update user object counter: %w", err)
}
return nil
@@ -492,7 +492,7 @@ func parseContainerCounterKey(buf []byte) (cid.ID, error) {
}
var cnrID cid.ID
if err := cnrID.Decode(buf); err != nil {
- return cid.ID{}, fmt.Errorf("failed to decode container ID: %w", err)
+ return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
}
return cnrID, nil
}
@@ -654,7 +654,7 @@ func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error {
return ErrReadOnlyMode
}
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket(containerVolumeBucketName)
key := make([]byte, cidSize)
@@ -737,7 +737,7 @@ func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error {
return ErrReadOnlyMode
}
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket(containerCounterBucketName)
key := make([]byte, cidSize)
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index d1f808a63..950385a29 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -22,7 +22,7 @@ func TestCounters(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
c, err := db.ObjectCounters()
require.NoError(t, err)
require.Zero(t, c.Phy)
@@ -37,7 +37,7 @@ func TestCounters(t *testing.T) {
t.Run("put", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := make([]*objectSDK.Object, 0, objCount)
for range objCount {
oo = append(oo, testutil.GenerateObject())
@@ -75,7 +75,7 @@ func TestCounters(t *testing.T) {
t.Run("delete", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, false)
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -120,7 +120,7 @@ func TestCounters(t *testing.T) {
t.Run("inhume", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, false)
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -156,13 +156,18 @@ func TestCounters(t *testing.T) {
}
var prm meta.InhumePrm
- prm.SetTombstoneAddress(oidtest.Address())
- prm.SetAddresses(inhumedObjs...)
+ for _, o := range inhumedObjs {
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(o.Container())
- res, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, uint64(len(inhumedObjs)), res.LogicInhumed())
- require.Equal(t, uint64(len(inhumedObjs)), res.UserInhumed())
+ prm.SetTombstoneAddress(tombAddr)
+ prm.SetAddresses(o)
+
+ res, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), res.LogicInhumed())
+ require.Equal(t, uint64(1), res.UserInhumed())
+ }
c, err := db.ObjectCounters()
require.NoError(t, err)
@@ -180,7 +185,7 @@ func TestCounters(t *testing.T) {
t.Run("put_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
parObj := testutil.GenerateObject()
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -218,7 +223,7 @@ func TestCounters(t *testing.T) {
t.Run("delete_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, true)
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -260,7 +265,7 @@ func TestCounters(t *testing.T) {
t.Run("inhume_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, true)
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -296,11 +301,16 @@ func TestCounters(t *testing.T) {
}
var prm meta.InhumePrm
- prm.SetTombstoneAddress(oidtest.Address())
- prm.SetAddresses(inhumedObjs...)
+ for _, o := range inhumedObjs {
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(o.Container())
- _, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
+ prm.SetTombstoneAddress(tombAddr)
+ prm.SetAddresses(o)
+
+ _, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ }
c, err := db.ObjectCounters()
require.NoError(t, err)
@@ -319,7 +329,7 @@ func TestCounters(t *testing.T) {
func TestDoublePut(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
obj := testutil.GenerateObject()
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -377,7 +387,7 @@ func TestCounters_Expired(t *testing.T) {
es := &epochState{epoch}
db := newDB(t, meta.WithEpochState(es))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := make([]oid.Address, objCount)
for i := range oo {
diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go
index 1f444a3ef..4474aa229 100644
--- a/pkg/local_object_storage/metabase/db.go
+++ b/pkg/local_object_storage/metabase/db.go
@@ -11,9 +11,9 @@ import (
"sync"
"time"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/mr-tron/base58"
"go.etcd.io/bbolt"
@@ -70,7 +70,7 @@ func defaultCfg() *cfg {
},
boltBatchDelay: bbolt.DefaultMaxBatchDelay,
boltBatchSize: bbolt.DefaultMaxBatchSize,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
metrics: &noopMetrics{},
}
}
diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go
index 01e1ed2bc..edaeb13c5 100644
--- a/pkg/local_object_storage/metabase/db_test.go
+++ b/pkg/local_object_storage/metabase/db_test.go
@@ -6,10 +6,10 @@ import (
"strconv"
"testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -32,7 +32,17 @@ func putBig(db *meta.DB, obj *objectSDK.Object) error {
}
func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) {
- res, err := metaSelect(db, cnr, fs)
+ res, err := metaSelect(db, cnr, fs, false)
+ require.NoError(t, err)
+ require.Len(t, res, len(exp))
+
+ for i := range exp {
+ require.Contains(t, res, exp[i])
+ }
+}
+
+func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) {
+ res, err := metaSelect(db, cnr, fs, useAttrIndex)
require.NoError(t, err)
require.Len(t, res, len(exp))
@@ -51,7 +61,7 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB {
)
require.NoError(t, bdb.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bdb.Init())
+ require.NoError(t, bdb.Init(context.Background()))
return bdb
}
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 00c8d06e0..9a5a6e574 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -77,8 +77,6 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) {
type referenceNumber struct {
all, cur int
- addr oid.Address
-
obj *objectSDK.Object
}
@@ -112,14 +110,14 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
var err error
var res DeleteRes
- err = db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
res, err = db.deleteGroup(tx, prm.addrs)
return err
})
if err == nil {
deleted = true
for i := range prm.addrs {
- storagelog.Write(db.log,
+ storagelog.Write(ctx, db.log,
storagelog.AddressField(prm.addrs[i]),
storagelog.OpField("metabase DELETE"))
}
@@ -163,28 +161,28 @@ func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error)
func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error {
if res.phyCount > 0 {
- err := db.updateShardObjectCounter(tx, phy, res.phyCount, false)
+ err := db.decShardObjectCounter(tx, phy, res.phyCount)
if err != nil {
- return fmt.Errorf("could not decrease phy object counter: %w", err)
+ return fmt.Errorf("decrease phy object counter: %w", err)
}
}
if res.logicCount > 0 {
- err := db.updateShardObjectCounter(tx, logical, res.logicCount, false)
+ err := db.decShardObjectCounter(tx, logical, res.logicCount)
if err != nil {
- return fmt.Errorf("could not decrease logical object counter: %w", err)
+ return fmt.Errorf("decrease logical object counter: %w", err)
}
}
if res.userCount > 0 {
- err := db.updateShardObjectCounter(tx, user, res.userCount, false)
+ err := db.decShardObjectCounter(tx, user, res.userCount)
if err != nil {
- return fmt.Errorf("could not decrease user object counter: %w", err)
+ return fmt.Errorf("decrease user object counter: %w", err)
}
}
if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil {
- return fmt.Errorf("could not decrease container object counter: %w", err)
+ return fmt.Errorf("decrease container object counter: %w", err)
}
return nil
}
@@ -261,7 +259,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
if garbageBKT != nil {
err := garbageBKT.Delete(addrKey)
if err != nil {
- return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
+ return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
}
}
return deleteSingleResult{}, nil
@@ -282,7 +280,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
if garbageBKT != nil {
err := garbageBKT.Delete(addrKey)
if err != nil {
- return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
+ return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
}
}
@@ -295,9 +293,8 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
nRef, ok := refCounter[k]
if !ok {
nRef = &referenceNumber{
- all: parentLength(tx, parAddr),
- addr: parAddr,
- obj: parent,
+ all: parentLength(tx, parAddr),
+ obj: parent,
}
refCounter[k] = nRef
@@ -311,7 +308,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
// remove object
err = db.deleteObject(tx, obj, false)
if err != nil {
- return deleteSingleResult{}, fmt.Errorf("could not remove object: %w", err)
+ return deleteSingleResult{}, fmt.Errorf("remove object: %w", err)
}
if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil {
@@ -338,7 +335,12 @@ func (db *DB) deleteObject(
err = updateListIndexes(tx, obj, delListIndexItem)
if err != nil {
- return fmt.Errorf("can't remove list indexes: %w", err)
+ return fmt.Errorf("remove list indexes: %w", err)
+ }
+
+ err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
+ if err != nil {
+ return fmt.Errorf("remove fake bucket tree indexes: %w", err)
}
if isParent {
@@ -349,7 +351,7 @@ func (db *DB) deleteObject(
addrKey := addressKey(object.AddressOf(obj), key)
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("could not remove from garbage bucket: %w", err)
+ return fmt.Errorf("remove from garbage bucket: %w", err)
}
}
}
@@ -361,12 +363,12 @@ func (db *DB) deleteObject(
func parentLength(tx *bbolt.Tx, addr oid.Address) int {
bucketName := make([]byte, bucketKeySize)
- bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:]))
+ bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName))
if bkt == nil {
return 0
}
- lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:])))
+ lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName)))
if err != nil {
return 0
}
@@ -374,11 +376,12 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int {
return len(lst)
}
-func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) {
+func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt := tx.Bucket(item.name)
if bkt != nil {
- _ = bkt.Delete(item.key) // ignore error, best effort there
+ return bkt.Delete(item.key)
}
+ return nil
}
func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
@@ -403,19 +406,56 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
// if list empty, remove the key from bucket
if len(lst) == 0 {
- _ = bkt.Delete(item.key) // ignore error, best effort there
-
- return nil
+ return bkt.Delete(item.key)
}
// if list is not empty, then update it
encodedLst, err := encodeList(lst)
if err != nil {
- return nil // ignore error, best effort there
+ return err
}
- _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there
- return nil
+ return bkt.Put(item.key, encodedLst)
+}
+
+func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ bkt := tx.Bucket(item.name)
+ if bkt == nil {
+ return nil
+ }
+
+ fkbtRoot := bkt.Bucket(item.key)
+ if fkbtRoot == nil {
+ return nil
+ }
+
+ if err := fkbtRoot.Delete(item.val); err != nil {
+ return err
+ }
+
+ if hasAnyItem(fkbtRoot) {
+ return nil
+ }
+
+ if err := bkt.DeleteBucket(item.key); err != nil {
+ return err
+ }
+
+ if hasAnyItem(bkt) {
+ return nil
+ }
+
+ return tx.DeleteBucket(item.name)
+}
+
+func hasAnyItem(b *bbolt.Bucket) bool {
+ var hasAnyItem bool
+ c := b.Cursor()
+ for k, _ := c.First(); k != nil; {
+ hasAnyItem = true
+ break
+ }
+ return hasAnyItem
}
func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error {
@@ -438,35 +478,47 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error
return ErrUnknownObjectType
}
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
} else {
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: parentBucketName(cnr, bucketName),
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
}
- delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
+ if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
name: smallBucketName(cnr, bucketName),
key: objKey,
- })
- delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
+ }); err != nil {
+ return err
+ }
+ if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
name: rootBucketName(cnr, bucketName),
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
if expEpoch, ok := hasExpirationEpoch(obj); ok {
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: expEpochToObjectBucketName,
key: expirationEpochKey(expEpoch, cnr, addr.Object()),
- })
- delUniqueIndexItem(tx, namedBucketItem{
+ }); err != nil {
+ return err
+ }
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
}
return nil
@@ -487,16 +539,18 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
+ return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
}
}
// also drop EC parent root info if current EC chunk is the last one
if !hasAnyChunks {
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
- })
+ }); err != nil {
+ return err
+ }
}
if ech.ParentSplitParentID() == nil {
@@ -525,16 +579,15 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
+ return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
}
}
// drop split info
- delUniqueIndexItem(tx, namedBucketItem{
+ return delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)),
})
- return nil
}
func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool {
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
index 66c79ecd7..884da23ff 100644
--- a/pkg/local_object_storage/metabase/delete_ec_test.go
+++ b/pkg/local_object_storage/metabase/delete_ec_test.go
@@ -30,8 +30,8 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close()) }()
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
ecChunk := oidtest.ID()
@@ -39,7 +39,6 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
tombstoneID := oidtest.ID()
chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj.SetPayloadSize(uint64(10))
@@ -131,17 +130,9 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
require.Equal(t, 2, len(tombstonedObjects))
- var tombstones []oid.Address
- for _, tss := range tombstonedObjects {
- tombstones = append(tombstones, tss.tomb)
- }
- inhumePrm.SetAddresses(tombstones...)
- inhumePrm.SetGCMark()
- _, err = db.Inhume(context.Background(), inhumePrm)
+ _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
require.NoError(t, err)
- require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects))
-
// GC finds tombstone as garbage and deletes it
garbageAddresses = nil
@@ -195,8 +186,8 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close()) }()
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
ecChunks := make([]oid.ID, chunksCount)
@@ -375,17 +366,9 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool
require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
require.True(t, len(tombstonedObjects) == parentCount+chunksCount)
- var tombstones []oid.Address
- for _, tss := range tombstonedObjects {
- tombstones = append(tombstones, tss.tomb)
- }
- inhumePrm.SetAddresses(tombstones...)
- inhumePrm.SetGCMark()
- _, err = db.Inhume(context.Background(), inhumePrm)
+ _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
require.NoError(t, err)
- require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects))
-
// GC finds tombstone as garbage and deletes it
garbageAddresses = nil
diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go
new file mode 100644
index 000000000..0329e3a73
--- /dev/null
+++ b/pkg/local_object_storage/metabase/delete_meta_test.go
@@ -0,0 +1,85 @@
+package meta
+
+import (
+ "bytes"
+ "context"
+ "path/filepath"
+ "testing"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/stretchr/testify/require"
+ "go.etcd.io/bbolt"
+)
+
+func TestPutDeleteIndexAttributes(t *testing.T) {
+ db := New([]Option{
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{}),
+ }...)
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name")
+ testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object")
+
+ var putPrm PutPrm
+ putPrm.SetObject(obj1)
+
+ _, err := db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ return nil
+ }))
+
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name")
+ testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object")
+
+ putPrm.SetObject(obj2)
+ putPrm.SetIndexAttributes(true)
+
+ _, err = db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize))
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.NotNil(t, b)
+ b = b.Bucket([]byte("CRDT-Name"))
+ require.NotNil(t, b)
+ require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.NotNil(t, b)
+ b = b.Bucket([]byte("/path/to/object"))
+ require.NotNil(t, b)
+ require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
+ return nil
+ }))
+
+ var dPrm DeletePrm
+ dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2))
+ _, err = db.Delete(context.Background(), dPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ return nil
+ }))
+}
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index cb85157e7..c0762a377 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -18,7 +18,7 @@ import (
func TestDB_Delete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
parent := testutil.GenerateObjectWithCID(cnr)
@@ -40,12 +40,12 @@ func TestDB_Delete(t *testing.T) {
// inhume parent and child so they will be on graveyard
ts := testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object())
require.NoError(t, err)
ts = testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object())
require.NoError(t, err)
// delete object
@@ -65,7 +65,7 @@ func TestDB_Delete(t *testing.T) {
func TestDeleteAllChildren(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -103,12 +103,12 @@ func TestDeleteAllChildren(t *testing.T) {
func TestGraveOnlyDelete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
addr := oidtest.Address()
// inhume non-existent object by address
- require.NoError(t, metaInhume(db, addr, oidtest.Address()))
+ require.NoError(t, metaInhume(db, addr, oidtest.ID()))
// delete the object data
require.NoError(t, metaDelete(db, addr))
@@ -116,7 +116,7 @@ func TestGraveOnlyDelete(t *testing.T) {
func TestExpiredObject(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
// removing expired object should be error-free
@@ -128,7 +128,7 @@ func TestExpiredObject(t *testing.T) {
func TestDelete(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
for range 10 {
@@ -170,7 +170,7 @@ func TestDelete(t *testing.T) {
func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
addr := oidtest.Address()
diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go
index 2e1b1dce8..7bd6f90a6 100644
--- a/pkg/local_object_storage/metabase/exists.go
+++ b/pkg/local_object_storage/metabase/exists.go
@@ -19,8 +19,8 @@ import (
// ExistsPrm groups the parameters of Exists operation.
type ExistsPrm struct {
- addr oid.Address
- paddr oid.Address
+ addr oid.Address
+ ecParentAddr oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
@@ -36,9 +36,9 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) {
p.addr = addr
}
-// SetParent is an Exists option to set objects parent.
-func (p *ExistsPrm) SetParent(addr oid.Address) {
- p.paddr = addr
+// SetECParent is an Exists option to set objects parent.
+func (p *ExistsPrm) SetECParent(addr oid.Address) {
+ p.ecParentAddr = addr
}
// Exists returns the fact that the object is in the metabase.
@@ -81,7 +81,7 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
currEpoch := db.epochState.CurrentEpoch()
err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.exists, res.locked, err = db.exists(tx, prm.addr, prm.paddr, currEpoch)
+ res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch)
return err
})
@@ -89,10 +89,21 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
return res, metaerr.Wrap(err)
}
-func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpoch uint64) (bool, bool, error) {
+func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) {
var locked bool
- if !parent.Equals(oid.Address{}) {
- locked = objectLocked(tx, parent.Container(), parent.Object())
+ if !ecParent.Equals(oid.Address{}) {
+ st, err := objectStatus(tx, ecParent, currEpoch)
+ if err != nil {
+ return false, false, err
+ }
+ switch st {
+ case 2:
+ return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
+ case 3:
+ return false, locked, ErrObjectIsExpired
+ }
+
+ locked = objectLocked(tx, ecParent.Container(), ecParent.Object())
}
// check graveyard and object expiration first
st, err := objectStatus(tx, addr, currEpoch)
@@ -142,12 +153,16 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpo
// - 2 if object is covered with tombstone;
// - 3 if object is expired.
func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
+ return objectStatusWithCache(nil, tx, addr, currEpoch)
+}
+
+func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
// locked object could not be removed/marked with GC/expired
- if objectLocked(tx, addr.Container(), addr.Object()) {
+ if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) {
return 0, nil
}
- expired, err := isExpired(tx, addr, currEpoch)
+ expired, err := isExpiredWithCache(bc, tx, addr, currEpoch)
if err != nil {
return 0, err
}
@@ -156,8 +171,8 @@ func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, erro
return 3, nil
}
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
+ graveyardBkt := getGraveyardBucket(bc, tx)
+ garbageBkt := getGarbageBucket(bc, tx)
addrKey := addressKey(addr, make([]byte, addressKeySize))
return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil
}
@@ -217,7 +232,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e
err := splitInfo.Unmarshal(rawSplitInfo)
if err != nil {
- return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err)
+ return nil, fmt.Errorf("unmarshal split info from root index: %w", err)
}
return splitInfo, nil
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index 0087c1e31..3045e17f1 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"errors"
"testing"
@@ -18,7 +19,7 @@ const currEpoch = 1000
func TestDB_Exists(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
t.Run("no object", func(t *testing.T) {
nonExist := testutil.GenerateObject()
@@ -37,7 +38,7 @@ func TestDB_Exists(t *testing.T) {
require.True(t, exists)
t.Run("removed object", func(t *testing.T) {
- err := metaInhume(db, object.AddressOf(regular), oidtest.Address())
+ err := metaInhume(db, object.AddressOf(regular), oidtest.ID())
require.NoError(t, err)
exists, err := metaExists(db, object.AddressOf(regular))
diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go
index 68144d8b1..a1351cb6f 100644
--- a/pkg/local_object_storage/metabase/expired.go
+++ b/pkg/local_object_storage/metabase/expired.go
@@ -74,9 +74,11 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
}
func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
- bucketName := make([]byte, bucketKeySize)
- bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName)
- b := tx.Bucket(bucketName)
+ return isExpiredWithCache(nil, tx, addr, currEpoch)
+}
+
+func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
+ b := getExpiredBucket(bc, tx, addr.Container())
if b == nil {
return false, nil
}
diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go
index bb98745ee..495c1eee7 100644
--- a/pkg/local_object_storage/metabase/expired_test.go
+++ b/pkg/local_object_storage/metabase/expired_test.go
@@ -13,7 +13,7 @@ import (
func TestDB_SelectExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
containerID1 := cidtest.ID()
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index 776f5d27c..821810c09 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -1,7 +1,6 @@
package meta
import (
- "bytes"
"context"
"fmt"
"time"
@@ -89,8 +88,12 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
}
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
+ return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch)
+}
+
+func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
if checkStatus {
- st, err := objectStatus(tx, addr, currEpoch)
+ st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
if err != nil {
return nil, err
}
@@ -110,12 +113,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b
bucketName := make([]byte, bucketKeySize)
// check in primary index
- data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key)
- if len(data) != 0 {
- return obj, obj.Unmarshal(bytes.Clone(data))
+ if b := getPrimaryBucket(bc, tx, cnr); b != nil {
+ if data := b.Get(key); len(data) != 0 {
+ return obj, obj.Unmarshal(data)
+ }
}
- data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
+ data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
if len(data) != 0 {
return nil, getECInfoError(tx, cnr, data)
}
@@ -123,13 +127,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b
// if not found then check in tombstone index
data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(bytes.Clone(data))
+ return obj, obj.Unmarshal(data)
}
// if not found then check in locker index
data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(bytes.Clone(data))
+ return obj, obj.Unmarshal(data)
}
// if not found then check if object is a virtual
@@ -187,7 +191,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
err = child.Unmarshal(data)
if err != nil {
- return nil, fmt.Errorf("can't unmarshal child with parent: %w", err)
+ return nil, fmt.Errorf("unmarshal child with parent: %w", err)
}
par := child.Parent()
@@ -216,10 +220,10 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error {
ecInfo := objectSDK.NewECInfo()
for _, key := range keys {
// check in primary index
- ojbData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
- if len(ojbData) != 0 {
+ objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
+ if len(objData) != 0 {
obj := objectSDK.New()
- if err := obj.Unmarshal(ojbData); err != nil {
+ if err := obj.Unmarshal(objData); err != nil {
return err
}
chunk := objectSDK.ECChunk{}
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 7654d2cd8..98c428410 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -25,7 +25,7 @@ import (
func TestDB_Get(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
raw := testutil.GenerateObject()
@@ -150,9 +150,8 @@ func TestDB_Get(t *testing.T) {
t.Run("get removed object", func(t *testing.T) {
obj := oidtest.Address()
- ts := oidtest.Address()
- require.NoError(t, metaInhume(db, obj, ts))
+ require.NoError(t, metaInhume(db, obj, oidtest.ID()))
_, err := metaGet(db, obj, false)
require.True(t, client.IsErrObjectAlreadyRemoved(err))
@@ -220,7 +219,6 @@ func benchmarkGet(b *testing.B, numOfObj int) {
meta.WithMaxBatchSize(batchSize),
meta.WithMaxBatchDelay(10*time.Millisecond),
)
- defer func() { require.NoError(b, db.Close()) }()
addrs := make([]oid.Address, 0, numOfObj)
for range numOfObj {
@@ -235,6 +233,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
}
db, addrs := prepareDb(runtime.NumCPU())
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
b.Run("parallel", func(b *testing.B) {
b.ReportAllocs()
@@ -254,7 +253,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
})
})
- require.NoError(b, db.Close())
+ require.NoError(b, db.Close(context.Background()))
require.NoError(b, os.RemoveAll(b.Name()))
db, addrs = prepareDb(1)
diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go
index 80d40fb78..2f23d424c 100644
--- a/pkg/local_object_storage/metabase/graveyard.go
+++ b/pkg/local_object_storage/metabase/graveyard.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -176,7 +177,7 @@ type gcHandler struct {
func (g gcHandler) handleKV(k, _ []byte) error {
o, err := garbageFromKV(k)
if err != nil {
- return fmt.Errorf("could not parse garbage object: %w", err)
+ return fmt.Errorf("parse garbage object: %w", err)
}
return g.h(o)
@@ -189,7 +190,7 @@ type graveyardHandler struct {
func (g graveyardHandler) handleKV(k, v []byte) error {
o, err := graveFromKV(k, v)
if err != nil {
- return fmt.Errorf("could not parse grave: %w", err)
+ return fmt.Errorf("parse grave: %w", err)
}
return g.h(o)
@@ -239,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address)
func garbageFromKV(k []byte) (res GarbageObject, err error) {
err = decodeAddressFromKey(&res.addr, k)
if err != nil {
- err = fmt.Errorf("could not parse address: %w", err)
+ err = fmt.Errorf("parse address: %w", err)
}
return
@@ -255,46 +256,58 @@ func graveFromKV(k, v []byte) (res TombstonedObject, err error) {
return
}
-// DropGraves deletes tombstoned objects from the
+// InhumeTombstones deletes tombstoned objects from the
// graveyard bucket.
//
// Returns any error appeared during deletion process.
-func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error {
+func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("DropGraves", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.DropGraves")
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones")
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
if db.mode.NoMetabase() {
- return ErrDegradedMode
+ return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
- return ErrReadOnlyMode
+ return InhumeRes{}, ErrReadOnlyMode
}
buf := make([]byte, addressKeySize)
+ prm := InhumePrm{forceRemoval: true}
+ currEpoch := db.epochState.CurrentEpoch()
- return db.boltDB.Update(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(graveyardBucketName)
- if bkt == nil {
- return nil
+ var res InhumeRes
+
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)}
+
+ garbageBKT := tx.Bucket(garbageBucketName)
+ graveyardBKT := tx.Bucket(graveyardBucketName)
+
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
+ if err != nil {
+ return err
}
- for _, ts := range tss {
- err := bkt.Delete(addressKey(ts.Address(), buf))
- if err != nil {
+ for i := range tss {
+ if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil {
+ return err
+ }
+ if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil {
return err
}
}
return nil
})
+ return res, err
}
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index 75c7e2852..ebadecc04 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -7,6 +7,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -14,7 +17,7 @@ import (
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
var counter int
var iterGravePRM meta.GraveyardIterationPrm
@@ -41,7 +44,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
func TestDB_Iterate_OffsetNotFound(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
obj1 := testutil.GenerateObject()
obj2 := testutil.GenerateObject()
@@ -112,13 +115,14 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
- obj3 := testutil.GenerateObject()
- obj4 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj3 := testutil.GenerateObjectWithCID(cnr)
+ obj4 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -138,6 +142,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
@@ -199,13 +204,14 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
- obj3 := testutil.GenerateObject()
- obj4 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj3 := testutil.GenerateObjectWithCID(cnr)
+ obj4 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -223,6 +229,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(
@@ -298,7 +305,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
func TestDB_IterateOverGarbage_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// generate and put 4 objects
obj1 := testutil.GenerateObject()
@@ -388,13 +395,14 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
require.False(t, iWasCalled)
}
-func TestDB_DropGraves(t *testing.T) {
+func TestDB_InhumeTombstones(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ cnr := cidtest.ID()
// generate and put 2 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -404,8 +412,20 @@ func TestDB_DropGraves(t *testing.T) {
err = putBig(db, obj2)
require.NoError(t, err)
- // inhume with tombstone
- addrTombstone := oidtest.Address()
+ id1, _ := obj1.ID()
+ id2, _ := obj2.ID()
+ ts := objectSDK.NewTombstone()
+ ts.SetMembers([]oid.ID{id1, id2})
+ objTs := objectSDK.New()
+ objTs.SetContainerID(cnr)
+ objTs.SetType(objectSDK.TypeTombstone)
+
+ data, _ := ts.Marshal()
+ objTs.SetPayload(data)
+ require.NoError(t, objectSDK.CalculateAndSetID(objTs))
+ require.NoError(t, putBig(db, objTs))
+
+ addrTombstone := object.AddressOf(objTs)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
@@ -428,8 +448,11 @@ func TestDB_DropGraves(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 2, counter)
- err = db.DropGraves(context.Background(), buriedTS)
+ res, err := db.InhumeTombstones(context.Background(), buriedTS)
require.NoError(t, err)
+ require.EqualValues(t, 1, res.LogicInhumed())
+ require.EqualValues(t, 0, res.UserInhumed())
+ require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID())
counter = 0
iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error {
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index b62accc43..76018fb61 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -143,6 +143,20 @@ func (p *InhumePrm) SetForceGCMark() {
p.forceRemoval = true
}
+func (p *InhumePrm) validate() error {
+ if p == nil {
+ return nil
+ }
+ if p.tomb != nil {
+ for _, addr := range p.target {
+ if addr.Container() != p.tomb.Container() {
+ return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb)
+ }
+ }
+ }
+ return nil
+}
+
var errBreakBucketForEach = errors.New("bucket ForEach break")
// ErrLockObjectRemoval is returned when inhume operation is being
@@ -171,6 +185,10 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
+ if err := prm.validate(); err != nil {
+ return InhumeRes{}, err
+ }
+
if db.mode.NoMetabase() {
return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
@@ -181,13 +199,13 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
inhumedByCnrID: make(map[cid.ID]ObjectCounters),
}
currEpoch := db.epochState.CurrentEpoch()
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
return db.inhumeTx(tx, currEpoch, prm, &res)
})
success = err == nil
if success {
for _, addr := range prm.target {
- storagelog.Write(db.log,
+ storagelog.Write(ctx, db.log,
storagelog.AddressField(addr),
storagelog.OpField("metabase INHUME"))
}
@@ -199,85 +217,93 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes
garbageBKT := tx.Bucket(garbageBucketName)
graveyardBKT := tx.Bucket(graveyardBucketName)
- bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, &prm)
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
if err != nil {
return err
}
buf := make([]byte, addressKeySize)
for i := range prm.target {
- id := prm.target[i].Object()
- cnr := prm.target[i].Container()
-
- // prevent locked objects to be inhumed
- if !prm.forceRemoval && objectLocked(tx, cnr, id) {
- return new(apistatus.ObjectLocked)
- }
-
- var lockWasChecked bool
-
- // prevent lock objects to be inhumed
- // if `Inhume` was called not with the
- // `WithForceGCMark` option
- if !prm.forceRemoval {
- if isLockObject(tx, cnr, id) {
- return ErrLockObjectRemoval
- }
-
- lockWasChecked = true
- }
-
- obj, err := db.get(tx, prm.target[i], buf, false, true, epoch)
- targetKey := addressKey(prm.target[i], buf)
- var ecErr *objectSDK.ECInfoError
- if err == nil {
- err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
- if err != nil {
- return err
- }
- } else if errors.As(err, &ecErr) {
- err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
- if err != nil {
- return err
- }
- }
-
- if prm.tomb != nil {
- var isTomb bool
- isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
- if err != nil {
- return err
- }
-
- if isTomb {
- continue
- }
- }
-
- // consider checking if target is already in graveyard?
- err = bkt.Put(targetKey, value)
- if err != nil {
+ if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil {
return err
}
-
- if prm.lockObjectHandling {
- // do not perform lock check if
- // it was already called
- if lockWasChecked {
- // inhumed object is not of
- // the LOCK type
- continue
- }
-
- if isLockObject(tx, cnr, id) {
- res.deletedLockObj = append(res.deletedLockObj, prm.target[i])
- }
- }
}
return db.applyInhumeResToCounters(tx, res)
}
+func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error {
+ id := addr.Object()
+ cnr := addr.Container()
+ tx := bkt.Tx()
+
+ // prevent locked objects to be inhumed
+ if !prm.forceRemoval && objectLocked(tx, cnr, id) {
+ return new(apistatus.ObjectLocked)
+ }
+
+ var lockWasChecked bool
+
+ // prevent lock objects to be inhumed
+ // if `Inhume` was called not with the
+ // `WithForceGCMark` option
+ if !prm.forceRemoval {
+ if isLockObject(tx, cnr, id) {
+ return ErrLockObjectRemoval
+ }
+
+ lockWasChecked = true
+ }
+
+ obj, err := db.get(tx, addr, buf, false, true, epoch)
+ targetKey := addressKey(addr, buf)
+ var ecErr *objectSDK.ECInfoError
+ if err == nil {
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
+ if err != nil {
+ return err
+ }
+ } else if errors.As(err, &ecErr) {
+ err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
+ if err != nil {
+ return err
+ }
+ }
+
+ if prm.tomb != nil {
+ var isTomb bool
+ isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
+ if err != nil {
+ return err
+ }
+
+ if isTomb {
+ return nil
+ }
+ }
+
+ // consider checking if target is already in graveyard?
+ err = bkt.Put(targetKey, value)
+ if err != nil {
+ return err
+ }
+
+ if prm.lockObjectHandling {
+ // do not perform lock check if
+ // it was already called
+ if lockWasChecked {
+ // inhumed object is not of
+ // the LOCK type
+ return nil
+ }
+
+ if isLockObject(tx, cnr, id) {
+ res.deletedLockObj = append(res.deletedLockObj, addr)
+ }
+ }
+ return nil
+}
+
func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes,
garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket,
ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte,
@@ -316,10 +342,10 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I
}
func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
- if err := db.updateShardObjectCounter(tx, logical, res.LogicInhumed(), false); err != nil {
+ if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil {
return err
}
- if err := db.updateShardObjectCounter(tx, user, res.UserInhumed(), false); err != nil {
+ if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil {
return err
}
@@ -336,7 +362,7 @@ func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
// 1. tombstone address if Inhume was called with
// a Tombstone
// 2. zeroValue if Inhume was called with a GC mark
-func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm *InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
+func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
if prm.tomb != nil {
targetBucket = graveyardBKT
tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
@@ -347,7 +373,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
if data != nil {
err := targetBucket.Delete(tombKey)
if err != nil {
- return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err)
+ return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err)
}
}
@@ -359,11 +385,8 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
return targetBucket, value, nil
}
-func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool, error) {
- targetIsTomb, err := isTomb(graveyardBKT, key)
- if err != nil {
- return false, err
- }
+func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) {
+ targetIsTomb := isTomb(graveyardBKT, addressKey)
// do not add grave if target is a tombstone
if targetIsTomb {
@@ -372,7 +395,7 @@ func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool
// if tombstone appears object must be
// additionally marked with GC
- return false, garbageBKT.Put(key, zeroValue)
+ return false, garbageBKT.Put(addressKey, zeroValue)
}
func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error {
@@ -392,25 +415,21 @@ func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Buc
return nil
}
-func isTomb(graveyardBucket *bbolt.Bucket, key []byte) (bool, error) {
+func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool {
targetIsTomb := false
// iterate over graveyard and check if target address
// is the address of tombstone in graveyard.
- err := graveyardBucket.ForEach(func(_, v []byte) error {
+ // tombstone must have the same container ID as key.
+ c := graveyardBucket.Cursor()
+ containerPrefix := addressKey[:cidSize]
+ for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() {
// check if graveyard has record with key corresponding
// to tombstone address (at least one)
- targetIsTomb = bytes.Equal(v, key)
-
+ targetIsTomb = bytes.Equal(v, addressKey)
if targetIsTomb {
- // break bucket iterator
- return errBreakBucketForEach
+ break
}
-
- return nil
- })
- if err != nil && !errors.Is(err, errBreakBucketForEach) {
- return false, err
}
- return targetIsTomb, nil
+ return targetIsTomb
}
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
index c3b1e72da..180713287 100644
--- a/pkg/local_object_storage/metabase/inhume_ec_test.go
+++ b/pkg/local_object_storage/metabase/inhume_ec_test.go
@@ -25,8 +25,8 @@ func TestInhumeECObject(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close()) }()
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
ecChunk := oidtest.ID()
@@ -35,14 +35,12 @@ func TestInhumeECObject(t *testing.T) {
tombstoneID := oidtest.ID()
chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
chunkObj.SetPayloadSize(uint64(5))
chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
chunkObj2 := testutil.GenerateObjectWithCID(cnr)
- chunkObj2.SetContainerID(cnr)
chunkObj2.SetID(ecChunk2)
chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj2.SetPayloadSize(uint64(10))
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index 163fbec2a..786d10396 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -9,6 +9,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -16,17 +17,15 @@ import (
func TestDB_Inhume(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
raw := testutil.GenerateObject()
testutil.AddAttribute(raw, "foo", "bar")
- tombstoneID := oidtest.Address()
-
err := putBig(db, raw)
require.NoError(t, err)
- err = metaInhume(db, object.AddressOf(raw), tombstoneID)
+ err = metaInhume(db, object.AddressOf(raw), oidtest.ID())
require.NoError(t, err)
_, err = metaExists(db, object.AddressOf(raw))
@@ -38,18 +37,25 @@ func TestDB_Inhume(t *testing.T) {
func TestInhumeTombOnTomb(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
var (
err error
+ cnr = cidtest.ID()
addr1 = oidtest.Address()
addr2 = oidtest.Address()
addr3 = oidtest.Address()
+ addr4 = oidtest.Address()
inhumePrm meta.InhumePrm
existsPrm meta.ExistsPrm
)
+ addr1.SetContainer(cnr)
+ addr2.SetContainer(cnr)
+ addr3.SetContainer(cnr)
+ addr4.SetContainer(cnr)
+
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(addr2)
@@ -84,7 +90,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
require.True(t, client.IsErrObjectAlreadyRemoved(err))
inhumePrm.SetAddresses(addr1)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ inhumePrm.SetTombstoneAddress(addr4)
// try to inhume addr1 (which is already a tombstone in graveyard)
_, err = db.Inhume(context.Background(), inhumePrm)
@@ -101,7 +107,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
func TestInhumeLocked(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
locked := oidtest.Address()
@@ -117,10 +123,13 @@ func TestInhumeLocked(t *testing.T) {
require.ErrorAs(t, err, &e)
}
-func metaInhume(db *meta.DB, target, tomb oid.Address) error {
+func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error {
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(target)
- inhumePrm.SetTombstoneAddress(tomb)
+ var tombAddr oid.Address
+ tombAddr.SetContainer(target.Container())
+ tombAddr.SetObject(tomb)
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err := db.Inhume(context.Background(), inhumePrm)
return err
diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go
index d44c51fb2..9cccd7dad 100644
--- a/pkg/local_object_storage/metabase/iterators.go
+++ b/pkg/local_object_storage/metabase/iterators.go
@@ -3,7 +3,6 @@ package meta
import (
"context"
"errors"
- "fmt"
"strconv"
"time"
@@ -12,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
-
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
@@ -111,70 +109,6 @@ func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler)
return nil
}
-// IterateCoveredByTombstones iterates over all objects in DB which are covered
-// by tombstone with string address from tss. Locked objects are not included
-// (do not confuse with objects of type LOCK).
-//
-// If h returns ErrInterruptIterator, nil returns immediately.
-// Returns other errors of h directly.
-//
-// Does not modify tss.
-func (db *DB) IterateCoveredByTombstones(ctx context.Context, tss map[string]oid.Address, h func(oid.Address) error) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("IterateCoveredByTombstones", time.Since(startedAt), success)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateCoveredByTombstones")
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- return db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateCoveredByTombstones(tx, tss, h)
- })
-}
-
-func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error {
- bktGraveyard := tx.Bucket(graveyardBucketName)
-
- err := bktGraveyard.ForEach(func(k, v []byte) error {
- var addr oid.Address
- if err := decodeAddressFromKey(&addr, v); err != nil {
- return err
- }
- if _, ok := tss[addr.EncodeToString()]; ok {
- var addr oid.Address
-
- err := decodeAddressFromKey(&addr, k)
- if err != nil {
- return fmt.Errorf("could not parse address of the object under tombstone: %w", err)
- }
-
- if objectLocked(tx, addr.Container(), addr.Object()) {
- return nil
- }
-
- return h(addr)
- }
-
- return nil
- })
-
- if errors.Is(err, ErrInterruptIterator) {
- err = nil
- }
-
- return err
-}
-
func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error {
var cid cid.ID
var oid oid.ID
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 54d56d923..4c9579965 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -5,10 +5,10 @@ import (
"strconv"
"testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -17,7 +17,7 @@ import (
func TestDB_IterateExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const epoch = 13
@@ -66,60 +66,3 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt
return object2.AddressOf(obj)
}
-
-func TestDB_IterateCoveredByTombstones(t *testing.T) {
- db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
-
- ts := oidtest.Address()
- protected1 := oidtest.Address()
- protected2 := oidtest.Address()
- protectedLocked := oidtest.Address()
- garbage := oidtest.Address()
-
- var prm meta.InhumePrm
- var err error
-
- prm.SetAddresses(protected1, protected2, protectedLocked)
- prm.SetTombstoneAddress(ts)
-
- _, err = db.Inhume(context.Background(), prm)
- require.NoError(t, err)
-
- prm.SetAddresses(garbage)
- prm.SetGCMark()
-
- _, err = db.Inhume(context.Background(), prm)
- require.NoError(t, err)
-
- var handled []oid.Address
-
- tss := map[string]oid.Address{
- ts.EncodeToString(): ts,
- }
-
- err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
- handled = append(handled, addr)
- return nil
- })
- require.NoError(t, err)
-
- require.Len(t, handled, 3)
- require.Contains(t, handled, protected1)
- require.Contains(t, handled, protected2)
- require.Contains(t, handled, protectedLocked)
-
- err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
- require.NoError(t, err)
-
- handled = handled[:0]
-
- err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
- handled = append(handled, addr)
- return nil
- })
- require.NoError(t, err)
-
- require.Len(t, handled, 2)
- require.NotContains(t, handled, protectedLocked)
-}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index b4326a92c..2a0bd7f6a 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -1,6 +1,7 @@
package meta
import (
+ "bytes"
"context"
"time"
@@ -61,8 +62,33 @@ func (l ListRes) Cursor() *Cursor {
return l.cursor
}
+// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
+type IterateOverContainersPrm struct {
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, objectSDK.Type, cid.ID) error
+}
+
+// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type IterateOverObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
+ // Handler function executed upon objects in db.
+ Handler func(context.Context, *objectcore.Info) error
+}
+
+// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type CountAliveObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
+}
+
// ListWithCursor lists physical objects available in metabase starting from
-// cursor. Includes objects of all types. Does not include inhumed objects.
+// cursor. Includes objects of all types. Does not include inhumed and expired
+// objects.
// Use cursor value from response for consecutive requests.
//
// Returns ErrEndOfListing if there are no more objects to return or count
@@ -113,11 +139,12 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
var containerID cid.ID
var offset []byte
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
+ bc := newBucketCache()
rawAddr := make([]byte, cidSize, addressKeySize)
+ currEpoch := db.epochState.CurrentEpoch()
+
loop:
for ; name != nil; name, _ = c.Next() {
cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name)
@@ -141,8 +168,8 @@ loop:
bkt := tx.Bucket(name)
if bkt != nil {
copy(rawAddr, cidRaw)
- result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
- result, count, cursor, threshold)
+ result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
+ result, count, cursor, threshold, currEpoch)
if err != nil {
return nil, nil, err
}
@@ -160,8 +187,7 @@ loop:
if offset != nil {
// new slice is much faster but less memory efficient
// we need to copy, because offset exists during bbolt tx
- cursor.inBucketOffset = make([]byte, len(offset))
- copy(cursor.inBucketOffset, offset)
+ cursor.inBucketOffset = bytes.Clone(offset)
}
if len(result) == 0 {
@@ -170,29 +196,29 @@ loop:
// new slice is much faster but less memory efficient
// we need to copy, because bucketName exists during bbolt tx
- cursor.bucketName = make([]byte, len(bucketName))
- copy(cursor.bucketName, bucketName)
+ cursor.bucketName = bytes.Clone(bucketName)
return result, cursor, nil
}
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects.
-func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
+func selectNFromBucket(
+ bc *bucketCache,
+ bkt *bbolt.Bucket, // main bucket
objType objectSDK.Type, // type of the objects stored in the main bucket
- graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID
to []objectcore.Info, // listing result
limit int, // stop listing at `limit` items in result
cursor *Cursor, // start from cursor object
threshold bool, // ignore cursor and start immediately
+ currEpoch uint64,
) ([]objectcore.Info, []byte, *Cursor, error) {
if cursor == nil {
cursor = new(Cursor)
}
- count := len(to)
c := bkt.Cursor()
k, v := c.First()
@@ -204,7 +230,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
}
for ; k != nil; k, v = c.Next() {
- if count >= limit {
+ if len(to) >= limit {
break
}
@@ -214,17 +240,25 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
}
offset = k
+ graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
+ garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue
}
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return nil, nil, nil, err
+ }
+
+ expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
+ if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
+ continue
+ }
+
var isLinkingObj bool
var ecInfo *objectcore.ECInfo
if objType == objectSDK.TypeRegular {
- var o objectSDK.Object
- if err := o.Unmarshal(v); err != nil {
- return nil, nil, nil, err
- }
isLinkingObj = isLinkObject(&o)
ecHeader := o.ECHeader()
if ecHeader != nil {
@@ -240,7 +274,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
a.SetContainer(cnt)
a.SetObject(obj)
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
- count++
}
return to, offset, cursor, nil
@@ -259,3 +292,211 @@ func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte)
return rawID, name[0]
}
+
+// IterateOverContainers lists physical containers available in metabase starting from first.
+func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateOverContainers(ctx, tx, prm)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error {
+ var containerID cid.ID
+ for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} {
+ c := tx.Cursor()
+ for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() {
+ cidRaw, _ := parseContainerIDWithPrefix(&containerID, name)
+ if cidRaw == nil {
+ continue
+ }
+ var cnt cid.ID
+ copy(cnt[:], containerID[:])
+ var objType objectSDK.Type
+ switch prefix[0] {
+ case primaryPrefix:
+ objType = objectSDK.TypeRegular
+ case lockersPrefix:
+ objType = objectSDK.TypeLock
+ case tombstonePrefix:
+ objType = objectSDK.TypeTombstone
+ default:
+ continue
+ }
+ err := prm.Handler(ctx, objType, cnt)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first.
+func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateOverObjectsInContainer(ctx, tx, prm)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error {
+ var prefix byte
+ switch prm.ObjectType {
+ case objectSDK.TypeRegular:
+ prefix = primaryPrefix
+ case objectSDK.TypeLock:
+ prefix = lockersPrefix
+ case objectSDK.TypeTombstone:
+ prefix = tombstonePrefix
+ default:
+ return nil
+ }
+ bucketName := []byte{prefix}
+ bucketName = append(bucketName, prm.ContainerID[:]...)
+
+ bkt := tx.Bucket(bucketName)
+ if bkt == nil {
+ return nil
+ }
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
+ c := bkt.Cursor()
+ k, v := c.First()
+
+ for ; k != nil; k, v = c.Next() {
+ var obj oid.ID
+ if err := obj.Decode(k); err != nil {
+ break
+ }
+
+ if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
+ continue
+ }
+
+ var isLinkingObj bool
+ var ecInfo *objectcore.ECInfo
+ if prm.ObjectType == objectSDK.TypeRegular {
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return err
+ }
+ isLinkingObj = isLinkObject(&o)
+ ecHeader := o.ECHeader()
+ if ecHeader != nil {
+ ecInfo = &objectcore.ECInfo{
+ ParentID: ecHeader.Parent(),
+ Index: ecHeader.Index(),
+ Total: ecHeader.Total(),
+ }
+ }
+ }
+
+ var a oid.Address
+ a.SetContainer(prm.ContainerID)
+ a.SetObject(obj)
+ objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
+ err := prm.Handler(ctx, &objInfo)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
+func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ var prefix byte
+ switch prm.ObjectType {
+ case objectSDK.TypeRegular:
+ prefix = primaryPrefix
+ case objectSDK.TypeLock:
+ prefix = lockersPrefix
+ case objectSDK.TypeTombstone:
+ prefix = tombstonePrefix
+ default:
+ return 0, nil
+ }
+ bucketName := []byte{prefix}
+ bucketName = append(bucketName, prm.ContainerID[:]...)
+ var count uint64
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ bkt := tx.Bucket(bucketName)
+ if bkt == nil {
+ return nil
+ }
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
+ c := bkt.Cursor()
+ k, _ := c.First()
+ for ; k != nil; k, _ = c.Next() {
+ if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
+ continue
+ }
+ count++
+ }
+ return nil
+ })
+ success = err == nil
+ return count, metaerr.Wrap(err)
+}
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index 6207497b1..02985991c 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -3,13 +3,17 @@ package meta_test
import (
"context"
"errors"
+ "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
@@ -17,6 +21,8 @@ import (
func BenchmarkListWithCursor(b *testing.B) {
db := listWithCursorPrepareDB(b)
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
+
b.Run("1 item", func(b *testing.B) {
benchmarkListWithCursor(b, db, 1)
})
@@ -32,7 +38,6 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
NoSync: true,
})) // faster single-thread generation
- defer func() { require.NoError(b, db.Close()) }()
obj := testutil.GenerateObject()
for i := range 100_000 { // should be a multiple of all batch sizes
@@ -54,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
for range b.N {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
- if err != meta.ErrEndOfListing {
+ if !errors.Is(err, meta.ErrEndOfListing) {
b.Fatalf("error: %v", err)
}
prm.SetCursor(nil)
@@ -69,14 +74,16 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
func TestLisObjectsWithCursor(t *testing.T) {
t.Parallel()
- db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
-
const (
+ currEpoch = 100
+ expEpoch = currEpoch - 1
containers = 5
- total = containers * 4 // regular + ts + child + lock
+ total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired
)
+ db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
expected := make([]object.Info, 0, total)
// fill metabase with objects
@@ -110,7 +117,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
ts := testutil.GenerateObjectWithCID(containerID)
- err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object())
require.NoError(t, err)
// add one child object (do not include parent into expected)
@@ -125,6 +132,26 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, child)
require.NoError(t, err)
expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
+
+ // add expired object (do not include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+
+ // add non-expired object (include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
+
+ // add locked expired object (include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ objID := oidtest.ID()
+ obj.SetID(objID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+ require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID}))
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
}
t.Run("success with various count", func(t *testing.T) {
@@ -162,7 +189,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const total = 5
@@ -219,3 +246,59 @@ func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]objec
r, err := db.ListWithCursor(context.Background(), listPrm)
return r.AddressList(), r.Cursor(), err
}
+
+func TestIterateOver(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ const total uint64 = 5
+ for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} {
+ var expected []*objectSDK.Object
+ // fill metabase with objects
+ cid := cidtest.ID()
+ for range total {
+ obj := testutil.GenerateObjectWithCID(cid)
+ obj.SetType(typ)
+ err := metaPut(db, obj, nil)
+ require.NoError(t, err)
+ expected = append(expected, obj)
+ }
+
+ var metaIter meta.IterateOverObjectsInContainerPrm
+ var count uint64
+ metaIter.Handler = func(context.Context, *object.Info) error {
+ count++
+ return nil
+ }
+ metaIter.ContainerID = cid
+ metaIter.ObjectType = typ
+ err := db.IterateOverObjectsInContainer(context.Background(), metaIter)
+ require.NoError(t, err)
+ require.Equal(t, total, count)
+
+ var metaCount meta.CountAliveObjectsInContainerPrm
+ metaCount.ContainerID = cid
+ metaCount.ObjectType = typ
+ res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount)
+ require.NoError(t, err)
+ require.Equal(t, res, total)
+
+ err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1]))
+ require.NoError(t, err)
+
+ res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount)
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), res)
+ }
+ var count int
+ var metaPrm meta.IterateOverContainersPrm
+ metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error {
+ count++
+ return nil
+ }
+ err := db.IterateOverContainers(context.Background(), metaPrm)
+ require.NoError(t, err)
+ require.Equal(t, 3, count)
+}
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index 732ba426d..f4cb9e53b 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -4,8 +4,10 @@ import (
"bytes"
"context"
"fmt"
+ "slices"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -62,9 +64,7 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.
return ErrReadOnlyMode
}
- if len(locked) == 0 {
- panic("empty locked list")
- }
+ assert.False(len(locked) == 0, "empty locked list")
err := db.lockInternal(locked, cnr, locker)
success = err == nil
@@ -78,7 +78,7 @@ func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error {
}
key := make([]byte, cidSize)
- return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error {
if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular {
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
}
@@ -143,7 +143,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
var unlockedObjects []oid.Address
- if err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ if err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
for i := range lockers {
unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object())
if err != nil {
@@ -162,7 +162,11 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
// checks if specified object is locked in the specified container.
func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- bucketLocked := tx.Bucket(bucketNameLocked)
+ return objectLockedWithCache(nil, tx, idCnr, idObj)
+}
+
+func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
+ bucketLocked := getLockedBucket(bc, tx)
if bucketLocked != nil {
key := make([]byte, cidSize)
idCnr.Encode(key)
@@ -176,7 +180,7 @@ func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
}
// return `LOCK` id's if specified object is locked in the specified container.
-func getLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
+func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
var lockers []oid.ID
bucketLocked := tx.Bucket(bucketNameLocked)
if bucketLocked != nil {
@@ -250,7 +254,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres
unlockedObjects = append(unlockedObjects, addr)
} else {
// exclude locker
- keyLockers = append(keyLockers[:i], keyLockers[i+1:]...)
+ keyLockers = slices.Delete(keyLockers, i, i+1)
v, err = encodeList(keyLockers)
if err != nil {
@@ -351,20 +355,20 @@ func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, e
return res, err
}
-// GetLocked return `LOCK` id's if provided object is locked by any `LOCK`. Not found
+// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found
// object is considered as non-locked.
//
// Returns only non-logical errors related to underlying database.
-func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
+func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("GetLocked", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocked",
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
@@ -377,7 +381,7 @@ func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, er
return res, ErrDegradedMode
}
err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res, err = getLocked(tx, addr.Container(), addr.Object())
+ res, err = getLocks(tx, addr.Container(), addr.Object())
return nil
}))
success = err == nil
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 62a109b02..341ff9ad1 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -21,7 +21,7 @@ func TestDB_Lock(t *testing.T) {
cnr := cidtest.ID()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
t.Run("empty locked list", func(t *testing.T) {
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
@@ -73,7 +73,9 @@ func TestDB_Lock(t *testing.T) {
_, err := db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(objAddr.Container())
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
@@ -89,7 +91,9 @@ func TestDB_Lock(t *testing.T) {
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ tombAddr = oidtest.Address()
+ tombAddr.SetContainer(objAddr.Container())
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
})
@@ -103,7 +107,7 @@ func TestDB_Lock(t *testing.T) {
var objLockedErr *apistatus.ObjectLocked
// try to inhume locked object using tombstone
- err := metaInhume(db, objAddr, lockAddr)
+ err := metaInhume(db, objAddr, lockAddr.Object())
require.ErrorAs(t, err, &objLockedErr)
// free locked object
@@ -183,7 +187,7 @@ func TestDB_Lock_Expired(t *testing.T) {
es := &epochState{e: 123}
db := newDB(t, meta.WithEpochState(es))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// put an object
addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
@@ -205,7 +209,7 @@ func TestDB_IsLocked(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// existing and locked objs
diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go
index 2032ed6b2..7edb96384 100644
--- a/pkg/local_object_storage/metabase/mode.go
+++ b/pkg/local_object_storage/metabase/mode.go
@@ -1,6 +1,7 @@
package meta
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -8,7 +9,7 @@ import (
// SetMode sets the metabase mode of operation.
// If the mode assumes no operation metabase, the database is closed.
-func (db *DB) SetMode(m mode.Mode) error {
+func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
@@ -17,20 +18,20 @@ func (db *DB) SetMode(m mode.Mode) error {
}
if !db.mode.NoMetabase() {
- if err := db.Close(); err != nil {
- return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ if err := db.Close(ctx); err != nil {
+ return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
if m.NoMetabase() {
db.boltDB = nil
} else {
- err := db.openDB(m)
+ err := db.openDB(ctx, m)
if err == nil && !m.ReadOnly() {
- err = db.Init()
+ err = db.Init(ctx)
}
if err != nil {
- return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go
index 1b9f60055..28b42283f 100644
--- a/pkg/local_object_storage/metabase/mode_test.go
+++ b/pkg/local_object_storage/metabase/mode_test.go
@@ -25,13 +25,13 @@ func Test_Mode(t *testing.T) {
require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init())
+ require.NoError(t, bdb.Init(context.Background()))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close())
+ require.NoError(t, bdb.Close(context.Background()))
require.NoError(t, bdb.Open(context.Background(), mode.Degraded))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init())
+ require.NoError(t, bdb.Init(context.Background()))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close())
+ require.NoError(t, bdb.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index ff79a0387..5e1bbfe9e 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -9,12 +9,12 @@ import (
"strconv"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -35,6 +35,8 @@ type PutPrm struct {
obj *objectSDK.Object
id []byte
+
+ indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
@@ -52,10 +54,13 @@ func (p *PutPrm) SetStorageID(id []byte) {
p.id = id
}
+func (p *PutPrm) SetIndexAttributes(v bool) {
+ p.indexAttributes = v
+}
+
var (
- ErrUnknownObjectType = errors.New("unknown object type")
- ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it")
- ErrIncorrectRootObject = errors.New("invalid root object")
+ ErrUnknownObjectType = errors.New("unknown object type")
+ ErrIncorrectRootObject = errors.New("invalid root object")
)
// Put saves object header in metabase. Object payload expected to be cut.
@@ -90,12 +95,12 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
var e error
- res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch)
+ res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes)
return e
})
if err == nil {
success = true
- storagelog.Write(db.log,
+ storagelog.Write(ctx, db.log,
storagelog.AddressField(objectCore.AddressOf(prm.obj)),
storagelog.OpField("metabase PUT"))
}
@@ -108,15 +113,22 @@ func (db *DB) put(tx *bbolt.Tx,
id []byte,
si *objectSDK.SplitInfo,
currEpoch uint64,
+ indexAttributes bool,
) (PutRes, error) {
cnr, ok := obj.ContainerID()
if !ok {
return PutRes{}, errors.New("missing container in object")
}
+ var ecParentAddress oid.Address
+ if ecHeader := obj.ECHeader(); ecHeader != nil {
+ ecParentAddress.SetContainer(cnr)
+ ecParentAddress.SetObject(ecHeader.Parent())
+ }
+
isParent := si != nil
- exists, _, err := db.exists(tx, objectCore.AddressOf(obj), oid.Address{}, currEpoch)
+ exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch)
var splitInfoError *objectSDK.SplitInfoError
if errors.As(err, &splitInfoError) {
@@ -129,7 +141,7 @@ func (db *DB) put(tx *bbolt.Tx,
return PutRes{}, db.updateObj(tx, obj, id, si, isParent)
}
- return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch)
+ return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes)
}
func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error {
@@ -152,14 +164,14 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje
return nil
}
-func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64) error {
+func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error {
if par := obj.Parent(); par != nil && !isParent { // limit depth by two
parentSI, err := splitInfoFromObject(obj)
if err != nil {
return err
}
- _, err = db.put(tx, par, id, parentSI, currEpoch)
+ _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes)
if err != nil {
return err
}
@@ -167,12 +179,19 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
err := putUniqueIndexes(tx, obj, si, id)
if err != nil {
- return fmt.Errorf("can't put unique indexes: %w", err)
+ return fmt.Errorf("put unique indexes: %w", err)
}
err = updateListIndexes(tx, obj, putListIndexItem)
if err != nil {
- return fmt.Errorf("can't put list indexes: %w", err)
+ return fmt.Errorf("put list indexes: %w", err)
+ }
+
+ if indexAttributes {
+ err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
+ if err != nil {
+ return fmt.Errorf("put fake bucket tree indexes: %w", err)
+ }
}
// update container volume size estimation
@@ -230,7 +249,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad
}
rawObject, err := obj.CutPayload().Marshal()
if err != nil {
- return fmt.Errorf("can't marshal object header: %w", err)
+ return fmt.Errorf("marshal object header: %w", err)
}
return putUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
@@ -381,16 +400,56 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
return nil
}
+var indexedAttributes = map[string]struct{}{
+ "S3-Access-Box-CRDT-Name": {},
+ objectSDK.AttributeFilePath: {},
+}
+
+// IsAtrributeIndexed returns True if attribute is indexed by metabase.
+func IsAtrributeIndexed(attr string) bool {
+ _, found := indexedAttributes[attr]
+ return found
+}
+
+func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
+ id, _ := obj.ID()
+ cnr, _ := obj.ContainerID()
+ objKey := objectKey(id, make([]byte, objectKeySize))
+
+ key := make([]byte, bucketKeySize)
+ var attrs []objectSDK.Attribute
+ if obj.ECHeader() != nil {
+ attrs = obj.ECHeader().ParentAttributes()
+ objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize))
+ } else {
+ attrs = obj.Attributes()
+ }
+
+ // user specified attributes
+ for i := range attrs {
+ if !IsAtrributeIndexed(attrs[i].Key()) {
+ continue
+ }
+ key = attributeBucketName(cnr, attrs[i].Key(), key)
+ err := f(tx, namedBucketItem{
+ name: key,
+ key: []byte(attrs[i].Value()),
+ val: objKey,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
attributes := obj.Attributes()
if ech := obj.ECHeader(); ech != nil {
attributes = ech.ParentAttributes()
}
for _, attr := range attributes {
- if attr.Key() == objectV2.SysAttributeExpEpochNeoFS {
- expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
- return expEpoch, err == nil
- }
if attr.Key() == objectV2.SysAttributeExpEpoch {
expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
return expEpoch, err == nil
@@ -415,7 +474,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck
func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
+ return fmt.Errorf("create index %v: %w", item.name, err)
}
data, err := update(bkt.Get(item.key), item.val)
@@ -429,22 +488,36 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil })
}
+func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ bkt, err := createBucketLikelyExists(tx, item.name)
+ if err != nil {
+ return fmt.Errorf("create index %v: %w", item.name, err)
+ }
+
+ fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
+ if err != nil {
+ return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err)
+ }
+
+ return fkbtRoot.Put(item.val, zeroValue)
+}
+
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
+ return fmt.Errorf("create index %v: %w", item.name, err)
}
lst, err := decodeList(bkt.Get(item.key))
if err != nil {
- return fmt.Errorf("can't decode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("decode leaf list %v: %w", item.key, err)
}
lst = append(lst, item.val)
encodedLst, err := encodeList(lst)
if err != nil {
- return fmt.Errorf("can't encode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("encode leaf list %v: %w", item.key, err)
}
return bkt.Put(item.key, encodedLst)
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index 914f5ef06..f37ed4cf2 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -46,7 +46,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(runtime.NumCPU()))
- defer func() { require.NoError(b, db.Close()) }()
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
// Ensure the benchmark is bound by CPU and not waiting batch-delay time.
b.SetParallelism(1)
@@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(1))
- defer func() { require.NoError(b, db.Close()) }()
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
var index atomic.Int64
index.Store(-1)
objs := prepareObjects(b.N)
@@ -84,7 +84,7 @@ func BenchmarkPut(b *testing.B) {
func TestDB_PutBlobovniczaUpdate(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
raw1 := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4}
diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go
index 993079dce..5f0956f0b 100644
--- a/pkg/local_object_storage/metabase/reset_test.go
+++ b/pkg/local_object_storage/metabase/reset_test.go
@@ -30,14 +30,14 @@ func TestResetDropsContainerBuckets(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
for idx := range 100 {
var putPrm PutPrm
putPrm.SetObject(testutil.GenerateObject())
- putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
+ putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx))
_, err := db.Put(context.Background(), putPrm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index ed43fc41f..60da50671 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -8,17 +8,15 @@ import (
"strings"
"time"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
)
type (
@@ -37,8 +35,9 @@ type (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ useAttributeIndex bool
}
// SelectRes groups the resulting values of Select operation.
@@ -56,6 +55,10 @@ func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
+func (p *SelectPrm) SetUseAttributeIndex(v bool) {
+ p.useAttributeIndex = v
+}
+
// AddressList returns list of addresses of the selected objects.
func (r SelectRes) AddressList() []oid.Address {
return r.addrList
@@ -92,14 +95,14 @@ func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err err
currEpoch := db.epochState.CurrentEpoch()
return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch)
+ res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex)
success = err == nil
return err
}))
}
-func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64) ([]oid.Address, error) {
- group, err := groupFilters(fs)
+func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) {
+ group, err := groupFilters(fs, useAttributeIndex)
if err != nil {
return nil, err
}
@@ -128,6 +131,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
res := make([]oid.Address, 0, len(mAddr))
+ bc := newBucketCache()
for a, ind := range mAddr {
if ind != expLen {
continue // ignore objects with unmatched fast filters
@@ -142,7 +146,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(id)
- st, err := objectStatus(tx, addr, currEpoch)
+ st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
if err != nil {
return nil, err
}
@@ -150,7 +154,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
continue // ignore removed objects
}
- addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch)
+ addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch)
if !match {
continue // ignore objects with unmatched slow filters
}
@@ -218,7 +222,13 @@ func (db *DB) selectFastFilter(
selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum)
- default:
+ default: // user attribute
+ bucketName := attributeBucketName(cnr, f.Header(), bucketName)
+ if f.Operation() == objectSDK.MatchNotPresent {
+ selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum)
+ } else {
+ db.selectFromFKBT(tx, bucketName, f, to, fNum)
+ }
}
}
@@ -228,6 +238,15 @@ var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{
v2object.TypeLock.String(): {bucketNameLockers},
}
+func allBucketNames(cnr cid.ID) (names [][]byte) {
+ for _, fns := range mBucketNaming {
+ for _, fn := range fns {
+ names = append(names, fn(cnr, make([]byte, bucketKeySize)))
+ }
+ }
+ return
+}
+
func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal string) (names [][]byte) {
appendNames := func(key string) {
fns, ok := mBucketNaming[key]
@@ -259,6 +278,76 @@ func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal str
return
}
+func (db *DB) selectFromFKBT(
+ tx *bbolt.Tx,
+ name []byte, // fkbt root bucket name
+ f objectSDK.SearchFilter, // filter for operation and value
+ to map[string]int, // resulting cache
+ fNum int, // index of filter
+) { //
+ matchFunc, ok := db.matchers[f.Operation()]
+ if !ok {
+ return
+ }
+
+ fkbtRoot := tx.Bucket(name)
+ if fkbtRoot == nil {
+ return
+ }
+
+ _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
+ fkbtLeaf := fkbtRoot.Bucket(k)
+ if fkbtLeaf == nil {
+ return nil
+ }
+
+ return fkbtLeaf.ForEach(func(k, _ []byte) error {
+ markAddressInCache(to, fNum, string(k))
+
+ return nil
+ })
+ })
+}
+
+// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
+// resulting cache.
+func selectOutsideFKBT(
+ tx *bbolt.Tx,
+ incl [][]byte, // buckets
+ name []byte, // fkbt root bucket name
+ to map[string]int, // resulting cache
+ fNum int, // index of filter
+) {
+ mExcl := make(map[string]struct{})
+
+ bktExcl := tx.Bucket(name)
+ if bktExcl != nil {
+ _ = bktExcl.ForEachBucket(func(k []byte) error {
+ exclBktLeaf := bktExcl.Bucket(k)
+ return exclBktLeaf.ForEach(func(k, _ []byte) error {
+ mExcl[string(k)] = struct{}{}
+
+ return nil
+ })
+ })
+ }
+
+ for i := range incl {
+ bktIncl := tx.Bucket(incl[i])
+ if bktIncl == nil {
+ continue
+ }
+
+ _ = bktIncl.ForEach(func(k, _ []byte) error {
+ if _, ok := mExcl[string(k)]; !ok {
+ markAddressInCache(to, fNum, string(k))
+ }
+
+ return nil
+ })
+ }
+}
+
// selectFromList looks into index to find list of addresses to add in
// resulting cache.
func (db *DB) selectFromList(
@@ -282,24 +371,17 @@ func (db *DB) selectFromList(
case objectSDK.MatchStringEqual:
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
if err != nil {
- db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()))
return
}
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op)))
-
return
}
if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error {
l, err := decodeList(val)
if err != nil {
- db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf,
- zap.String("error", err.Error()),
- )
-
return err
}
@@ -307,10 +389,6 @@ func (db *DB) selectFromList(
return nil
}); err != nil {
- db.log.Debug(logs.MetabaseCantIterateOverTheBucket,
- zap.String("error", err.Error()),
- )
-
return
}
}
@@ -352,10 +430,6 @@ func (db *DB) selectObjectID(
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug(logs.MetabaseUnknownOperation,
- zap.Uint32("operation", uint32(f.Operation())),
- )
-
return
}
@@ -366,31 +440,25 @@ func (db *DB) selectObjectID(
return
}
- err := fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
+ _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
var id oid.ID
if err := id.Decode(k); err == nil {
appendOID(id)
}
return nil
})
- if err != nil {
- db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets,
- zap.String("error", err.Error()),
- )
- }
}
}
}
// matchSlowFilters return true if object header is matched by all slow filters.
-func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
+func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
result := addr
if len(f) == 0 {
return result, true
}
- buf := make([]byte, addressKeySize)
- obj, err := db.get(tx, addr, buf, true, false, currEpoch)
+ obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch)
if err != nil {
return result, false
}
@@ -401,17 +469,26 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
case v2object.FilterHeaderVersion:
data = []byte(obj.Version().String())
case v2object.FilterHeaderHomomorphicHash:
+ if isECChunk {
+ return result, false // EC chunk and EC parent hashes are incomparable
+ }
cs, _ := obj.PayloadHomomorphicHash()
data = cs.Value()
case v2object.FilterHeaderCreationEpoch:
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.CreationEpoch())
case v2object.FilterHeaderPayloadLength:
+ if isECChunk {
+ return result, false // EC chunk and EC parent payload lengths are incomparable
+ }
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.PayloadSize())
case v2object.FilterHeaderOwnerID:
data = []byte(obj.OwnerID().EncodeToString())
case v2object.FilterHeaderPayloadHash:
+ if isECChunk {
+ return result, false // EC chunk and EC parent payload hashes are incomparable
+ }
cs, _ := obj.PayloadChecksum()
data = cs.Value()
default: // user attribute
@@ -439,6 +516,29 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
return result, true
}
+func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
+ buf := make([]byte, addressKeySize)
+ obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch)
+ if err != nil {
+ var ecInfoError *objectSDK.ECInfoError
+ if errors.As(err, &ecInfoError) {
+ for _, chunk := range ecInfoError.ECInfo().Chunks {
+ var objID oid.ID
+ if err = objID.ReadFromV2(chunk.ID); err != nil {
+ continue
+ }
+ addr.SetObject(objID)
+ obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch)
+ if err == nil {
+ return obj, true, nil
+ }
+ }
+ }
+ return nil, false, err
+ }
+ return obj, false, nil
+}
+
func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
objectAttributes := obj.Attributes()
if ech := obj.ECHeader(); ech != nil {
@@ -455,7 +555,7 @@ func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
// groupFilters divides filters in two groups: fast and slow. Fast filters
// processed by indexes and slow filters processed after by unmarshaling
// object headers.
-func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
+func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) {
res := filterGroup{
fastFilters: make(objectSDK.SearchFilters, 0, len(filters)),
slowFilters: make(objectSDK.SearchFilters, 0, len(filters)),
@@ -466,7 +566,7 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
case v2object.FilterHeaderContainerID: // support deprecated field
err := res.cnr.DecodeString(filters[i].Value())
if err != nil {
- return filterGroup{}, fmt.Errorf("can't parse container id: %w", err)
+ return filterGroup{}, fmt.Errorf("parse container id: %w", err)
}
res.withCnrFilter = true
@@ -480,7 +580,11 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
v2object.FilterPropertyPhy:
res.fastFilters = append(res.fastFilters, filters[i])
default:
- res.slowFilters = append(res.slowFilters, filters[i])
+ if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) {
+ res.fastFilters = append(res.fastFilters, filters[i])
+ } else {
+ res.slowFilters = append(res.slowFilters, filters[i])
+ }
}
}
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 6469bbdbc..ce2156d2e 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -7,10 +7,10 @@ import (
"strconv"
"testing"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -26,9 +26,19 @@ import (
func TestDB_SelectUserAttributes(t *testing.T) {
t.Parallel()
+ t.Run("with_index", func(t *testing.T) {
+ testSelectUserAttributes(t, true)
+ })
+ t.Run("without_index", func(t *testing.T) {
+ testSelectUserAttributes(t, false)
+ })
+}
+
+func testSelectUserAttributes(t *testing.T, index bool) {
+ t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -36,44 +46,52 @@ func TestDB_SelectUserAttributes(t *testing.T) {
testutil.AddAttribute(raw1, "foo", "bar")
testutil.AddAttribute(raw1, "x", "y")
- err := putBig(db, raw1)
+ var putPrm meta.PutPrm
+ putPrm.SetIndexAttributes(index)
+ putPrm.SetObject(raw1)
+ _, err := db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw2 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw2, "foo", "bar")
testutil.AddAttribute(raw2, "x", "z")
- err = putBig(db, raw2)
+ putPrm.SetObject(raw2)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw3 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw3, "a", "b")
- err = putBig(db, raw3)
+ putPrm.SetObject(raw3)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw4 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw4, "path", "test/1/2")
+ testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2")
- err = putBig(db, raw4)
+ putPrm.SetObject(raw4)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw5 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw5, "path", "test/1/3")
+ testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3")
- err = putBig(db, raw5)
+ putPrm.SetObject(raw5)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw6 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw6, "path", "test/2/3")
+ testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3")
- err = putBig(db, raw6)
+ putPrm.SetObject(raw6)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw7 := testutil.GenerateObjectWithCID(cnr)
var attr objectSDK.Attribute
- attr.SetKey("path")
- attr.SetValue("test/3/4")
+ attr.SetKey(objectSDK.AttributeFilePath)
+ attr.SetValue("/test/3/4")
attrs := raw7.Attributes()
attrs = append(attrs, attr)
ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
@@ -81,37 +99,39 @@ func TestDB_SelectUserAttributes(t *testing.T) {
Attributes: attrs,
}, 0, 3, []byte{}, 0)
raw7.SetECHeader(ech)
- require.NoError(t, putBig(db, raw7))
+ putPrm.SetObject(raw7)
+ _, err = db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
var raw7Parent oid.Address
raw7Parent.SetContainer(cnr)
raw7Parent.SetObject(ech.Parent())
fs := objectSDK.SearchFilters{}
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw1))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1))
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw2))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2))
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "b", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw3))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3))
fs = objectSDK.SearchFilters{}
fs.AddFilter("c", "d", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs)
+ testSelect2(t, db, cnr, fs, index)
fs = objectSDK.SearchFilters{}
fs.AddFilter("foo", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
@@ -121,7 +141,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw4),
@@ -131,7 +151,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
@@ -143,7 +163,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
fs = objectSDK.SearchFilters{}
fs.AddFilter("key", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
@@ -154,8 +174,8 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix)
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
@@ -163,15 +183,15 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test/1", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix)
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw4),
object.AddressOf(raw5),
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test/3/4", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual)
+ testSelect2(t, db, cnr, fs, index,
raw7Parent,
)
}
@@ -180,7 +200,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -334,7 +354,7 @@ func TestDB_SelectInhume(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -352,11 +372,7 @@ func TestDB_SelectInhume(t *testing.T) {
object.AddressOf(raw2),
)
- var tombstone oid.Address
- tombstone.SetContainer(cnr)
- tombstone.SetObject(oidtest.ID())
-
- err = metaInhume(db, object.AddressOf(raw2), tombstone)
+ err = metaInhume(db, object.AddressOf(raw2), oidtest.ID())
require.NoError(t, err)
fs = objectSDK.SearchFilters{}
@@ -369,7 +385,7 @@ func TestDB_SelectPayloadHash(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -440,7 +456,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -548,7 +564,7 @@ func TestDB_SelectObjectID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -664,7 +680,7 @@ func TestDB_SelectOwnerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -766,6 +782,54 @@ func TestDB_SelectOwnerID(t *testing.T) {
})
}
+func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ ecChunk1 := oidtest.ID()
+ ecChunk2 := oidtest.ID()
+ ecParent := oidtest.ID()
+ var ecParentAddr oid.Address
+ ecParentAddr.SetContainer(cnr)
+ ecParentAddr.SetObject(ecParent)
+ var ecParentAttr []objectSDK.Attribute
+ var attr objectSDK.Attribute
+ attr.SetKey(objectSDK.AttributeFilePath)
+ attr.SetValue("/1/2/3")
+ ecParentAttr = append(ecParentAttr, attr)
+
+ chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetID(ecChunk1)
+ chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
+ chunkObj.SetPayloadSize(uint64(5))
+ chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0))
+
+ chunkObj2 := testutil.GenerateObjectWithCID(cnr)
+ chunkObj2.SetID(ecChunk2)
+ chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ chunkObj2.SetPayloadSize(uint64(10))
+ chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0))
+
+ // put object with EC
+
+ var prm meta.PutPrm
+ prm.SetObject(chunkObj)
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetObject(chunkObj2)
+ _, err = db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ fs := objectSDK.SearchFilters{}
+ fs.AddRootFilter()
+ fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix)
+ testSelect(t, db, cnr, fs, ecParentAddr)
+}
+
type testTarget struct {
objects []*objectSDK.Object
}
@@ -801,7 +865,7 @@ func TestDB_RawHead_SplitInfo(t *testing.T) {
)
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -842,7 +906,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
t.Run("first last, then linking", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
require.NoError(t, metaPut(db, lastPart, nil))
require.NoError(t, metaPut(db, linking, nil))
@@ -866,7 +930,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
})
t.Run("first linking, then last", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
require.NoError(t, metaPut(db, linking, nil))
require.NoError(t, metaPut(db, lastPart, nil))
@@ -890,7 +954,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
})
t.Run("only last part", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
require.NoError(t, metaPut(db, lastPart, nil))
@@ -920,7 +984,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) {
)
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -988,7 +1052,7 @@ func TestDB_SelectSplitID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -1045,7 +1109,7 @@ func TestDB_SelectContainerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -1093,7 +1157,7 @@ func TestDB_SelectContainerID(t *testing.T) {
func BenchmarkSelect(b *testing.B) {
const objCount = 1000
db := newDB(b)
- defer func() { require.NoError(b, db.Close()) }()
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
cid := cidtest.ID()
@@ -1135,23 +1199,25 @@ func TestExpiredObjects(t *testing.T) {
t.Parallel()
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
cidExp, _ := exp.ContainerID()
cidNonExp, _ := nonExp.ContainerID()
- objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{})
+ objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false)
require.NoError(t, err)
require.Empty(t, objs) // expired object should not be returned
- objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{})
+ objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false)
require.NoError(t, err)
require.NotEmpty(t, objs)
})
}
func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) {
+ b.ReportAllocs()
+
var prm meta.SelectPrm
prm.SetContainerID(cid)
prm.SetFilters(fs)
@@ -1167,10 +1233,11 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
}
}
-func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) {
var prm meta.SelectPrm
prm.SetFilters(fs)
prm.SetContainerID(cnr)
+ prm.SetUseAttributeIndex(useAttributeIndex)
res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go
index 88446494e..72618b1a0 100644
--- a/pkg/local_object_storage/metabase/shard_id.go
+++ b/pkg/local_object_storage/metabase/shard_id.go
@@ -2,6 +2,7 @@ package meta
import (
"bytes"
+ "context"
"errors"
"fmt"
"os"
@@ -21,7 +22,7 @@ var (
// If id is missing, returns nil, nil.
//
// GetShardID does not report any metrics.
-func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) {
+func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = mode
@@ -30,14 +31,14 @@ func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) {
return nil, nil
}
- if err := db.openDB(mode); err != nil {
- return nil, fmt.Errorf("failed to open metabase: %w", err)
+ if err := db.openDB(ctx, mode); err != nil {
+ return nil, fmt.Errorf("open metabase: %w", err)
}
id, err := db.readShardID()
if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
+ err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
}
return id, metaerr.Wrap(err)
@@ -59,7 +60,7 @@ func (db *DB) readShardID() ([]byte, error) {
// SetShardID sets metabase operation mode
// and writes shard id to db.
-func (db *DB) SetShardID(id []byte, mode metamode.Mode) error {
+func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = mode
@@ -68,8 +69,8 @@ func (db *DB) SetShardID(id []byte, mode metamode.Mode) error {
return ErrReadOnlyMode
}
- if err := db.openDB(mode); err != nil {
- return fmt.Errorf("failed to open metabase: %w", err)
+ if err := db.openDB(ctx, mode); err != nil {
+ return fmt.Errorf("open metabase: %w", err)
}
err := db.writeShardID(id)
@@ -78,7 +79,7 @@ func (db *DB) SetShardID(id []byte, mode metamode.Mode) error {
}
if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
+ err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
}
return metaerr.Wrap(err)
diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go
index 6d620b41a..8f2376503 100644
--- a/pkg/local_object_storage/metabase/storage_id.go
+++ b/pkg/local_object_storage/metabase/storage_id.go
@@ -35,7 +35,7 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
-func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
+func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) {
var (
startedAt = time.Now()
success = false
@@ -53,32 +53,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
+ var res StorageIDRes
if db.mode.NoMetabase() {
return res, ErrDegradedMode
}
- err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.id, err = db.storageID(tx, prm.addr)
-
- return err
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ res.id = db.storageID(tx, prm.addr)
+ return nil
})
success = err == nil
return res, metaerr.Wrap(err)
}
-func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) {
+func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte {
key := make([]byte, bucketKeySize)
smallBucket := tx.Bucket(smallBucketName(addr.Container(), key))
if smallBucket == nil {
- return nil, nil
+ return nil
}
storageID := smallBucket.Get(objectKey(addr.Object(), key))
if storageID == nil {
- return nil, nil
+ return nil
}
- return bytes.Clone(storageID), nil
+ return bytes.Clone(storageID)
}
// UpdateStorageIDPrm groups the parameters of UpdateStorageID operation.
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index aaf6480ab..fef680159 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -15,7 +15,7 @@ func TestDB_StorageID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
raw1 := testutil.GenerateObject()
raw2 := testutil.GenerateObject()
@@ -43,7 +43,7 @@ func TestDB_StorageID(t *testing.T) {
cnrID, ok := deleted.ContainerID()
require.True(t, ok)
ts := testutil.GenerateObjectWithCID(cnrID)
- require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts)))
+ require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object()))
// check StorageID for object without storageID
fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2))
@@ -79,7 +79,7 @@ func TestPutWritecacheDataRace(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close()) }()
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
putStorageID := []byte{1, 2, 3}
wcStorageID := []byte{1, 2, 3, 4, 5}
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index f677dcf8e..4948f3424 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/binary"
+ "encoding/hex"
"errors"
"fmt"
"os"
@@ -11,7 +12,8 @@ import (
"sync/atomic"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -25,15 +27,15 @@ const (
upgradeTimeout = 1 * time.Second
)
-var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{
+var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{
2: upgradeFromV2ToV3,
- 3: func(_ context.Context, _ *bbolt.DB, log func(a ...any)) error {
+ 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error {
log("metabase already upgraded")
return nil
},
}
-func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error {
+func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error {
if _, err := os.Stat(path); err != nil {
return fmt.Errorf("check metabase existence: %w", err)
}
@@ -61,7 +63,7 @@ func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any))
}); err != nil {
return fmt.Errorf("set upgrade key %w", err)
}
- if err := updater(ctx, db, log); err != nil {
+ if err := updater(ctx, db, cs, log); err != nil {
return fmt.Errorf("update metabase schema: %w", err)
}
if err := db.Update(func(tx *bbolt.Tx) error {
@@ -93,7 +95,7 @@ func compactDB(db *bbolt.DB) error {
NoSync: true,
})
if err != nil {
- return fmt.Errorf("can't open new metabase to compact: %w", err)
+ return fmt.Errorf("open new metabase to compact: %w", err)
}
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
@@ -113,17 +115,21 @@ func compactDB(db *bbolt.DB) error {
return nil
}
-func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
return err
}
- if err := dropUserAttributes(ctx, db, log); err != nil {
- return err
- }
- if err := dropOwnerIDIndex(ctx, db, log); err != nil {
- return err
- }
- if err := dropPayloadChecksumIndex(ctx, db, log); err != nil {
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.Go(func() error {
+ return dropUserAttributes(ctx, db, cs, log)
+ })
+ eg.Go(func() error {
+ return dropOwnerIDIndex(ctx, db, log)
+ })
+ eg.Go(func() error {
+ return dropPayloadChecksumIndex(ctx, db, log)
+ })
+ if err := eg.Wait(); err != nil {
return err
}
return db.Update(func(tx *bbolt.Tx) error {
@@ -252,7 +258,7 @@ func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, i
continue
}
attributeKey := string(attrKey[1+cidSize:])
- if attributeKey != objectV2.SysAttributeExpEpochNeoFS && attributeKey != objectV2.SysAttributeExpEpoch {
+ if attributeKey != objectV2.SysAttributeExpEpoch {
continue
}
var containerID cid.ID
@@ -286,7 +292,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
}
expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
if err != nil {
- return fmt.Errorf("could not parse expiration epoch: %w", err)
+ return fmt.Errorf("parse expiration epoch: %w", err)
}
expirationEpochBucket := b.Bucket(attrValue)
attrKeyValueC := expirationEpochBucket.Cursor()
@@ -323,10 +329,223 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
return nil
}
-func dropUserAttributes(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
- return dropBucketsByPrefix(ctx, db, []byte{userAttributePrefix}, func(a ...any) {
- log(append([]any{"user attributes:"}, a...)...)
- })
+func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
+ log("deleting user attribute buckets...")
+ const batch = 1000
+ prefix := []byte{userAttributePrefix}
+ last := prefix
+ var count uint64
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ var keys [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ c := tx.Cursor()
+ for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
+ if bytes.Equal(last, k) {
+ continue
+ }
+ keys = append(keys, bytes.Clone(k))
+ }
+ return nil
+ }); err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
+ return err
+ }
+ if len(keys) == 0 {
+ log("deleting user attribute buckets completed successfully, deleted", count, "buckets")
+ return nil
+ }
+ last = keys[len(keys)-1]
+ cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys)
+ if err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
+ return err
+ }
+ count += cnt
+ cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys)
+ if err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
+ return err
+ }
+ count += cnt
+ log("deleted", count, "user attribute buckets")
+ }
+}
+
+func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
+ keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs)
+ if err != nil {
+ return 0, fmt.Errorf("select non indexed user attributes: %w", err)
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, k := range keysToDrop {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("drop non indexed user attributes: %w", err)
+ }
+ return uint64(len(keysToDrop)), nil
+}
+
+func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
+ var keysToDrop [][]byte
+ for _, key := range keys {
+ attr, ok := attributeFromAttributeBucket(key)
+ if !ok {
+ return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
+ }
+ if !IsAtrributeIndexed(attr) {
+ keysToDrop = append(keysToDrop, key)
+ continue
+ }
+ contID, ok := cidFromAttributeBucket(key)
+ if !ok {
+ return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
+ }
+ info, err := cs.Info(ctx, contID)
+ if err != nil {
+ return nil, err
+ }
+ if info.Removed || !info.Indexed {
+ keysToDrop = append(keysToDrop, key)
+ }
+ }
+ return keysToDrop, nil
+}
+
+func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) {
+ var dropBuckets [][]byte
+ for _, key := range keys {
+ select {
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ default:
+ }
+
+ if err := dropEmptyNestedBuckets(ctx, db, key); err != nil {
+ return 0, err
+ }
+
+ empty, exists, err := bucketIsEmpty(db, key)
+ if err != nil {
+ return 0, err
+ }
+ if empty && exists {
+ dropBuckets = append(dropBuckets, key)
+ }
+ }
+ if len(dropBuckets) == 0 {
+ return 0, nil
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, key := range dropBuckets {
+ if err := tx.DeleteBucket(key); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("drop empty user attributes buckets: %w", err)
+ }
+ return uint64(len(dropBuckets)), nil
+}
+
+func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) {
+ var empty bool
+ var exists bool
+ if err := db.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(bucketKey)
+ if b == nil {
+ return nil
+ }
+ exists = true
+ empty = !hasAnyItem(b)
+ return nil
+ }); err != nil {
+ return false, false, fmt.Errorf("bucket empty check: %w", err)
+ }
+ return empty, exists, nil
+}
+
+func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error {
+ var last []byte
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var dropBuckets [][]byte
+ var err error
+ dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last)
+ if err != nil {
+ return fmt.Errorf("select empty nested buckets: %w", err)
+ }
+ if len(dropBuckets) == 0 {
+ return nil
+ }
+
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ rootBucket := tx.Bucket(rootBucketKey)
+ if rootBucket == nil {
+ return nil
+ }
+ for _, sb := range dropBuckets {
+ if err := rootBucket.DeleteBucket(sb); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("drop empty nested buckets: %w", err)
+ }
+ }
+}
+
+func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) {
+ const batchSize = 1000
+ var result [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ rootBucket := tx.Bucket(rootBucketKey)
+ if rootBucket == nil {
+ return nil
+ }
+ c := rootBucket.Cursor()
+ for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if bytes.Equal(last, k) {
+ continue
+ }
+ last = bytes.Clone(k)
+ if v != nil { // record
+ continue
+ }
+ nestedBucket := rootBucket.Bucket(k)
+ if nestedBucket == nil {
+ continue
+ }
+ if !hasAnyItem(nestedBucket) {
+ result = append(result, bytes.Clone(k))
+ }
+ }
+ return nil
+ }); err != nil {
+ return nil, nil, err
+ }
+ return result, last, nil
}
func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
@@ -366,7 +585,7 @@ func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log f
log("deleting buckets completed successfully, deleted", count, "buckets")
return nil
}
- if err := db.Update(func(tx *bbolt.Tx) error {
+ if err := db.Batch(func(tx *bbolt.Tx) error {
for _, k := range keys {
if err := tx.DeleteBucket(k); err != nil {
return err
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index 3797de0a4..c90de4dd6 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -11,11 +11,12 @@ import (
"testing"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -33,15 +34,21 @@ func TestUpgradeV2ToV3(t *testing.T) {
}()
db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t)))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.ErrorIs(t, db.Init(), ErrOutdatedVersion)
- require.NoError(t, db.Close())
- require.NoError(t, Upgrade(context.Background(), path, true, t.Log))
+ require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion)
+ require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
fmt.Println()
}
+type testContainerInfoProvider struct{}
+
+func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) {
+ return container.Info{}, nil
+}
+
func createTempCopy(t *testing.T, path string) string {
src, err := os.Open(path)
require.NoError(t, err)
@@ -80,7 +87,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
db.boltDB.AllocSize = allocSize
db.boltDB.NoSync = true
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
containers := make([]cid.ID, containersCount)
for i := range containers {
containers[i] = cidtest.ID()
@@ -95,7 +102,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
@@ -106,7 +113,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("simple objects generated")
+ db.log.Info(ctx, "simple objects generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// complex objects
@@ -118,8 +125,8 @@ func TestGenerateMetabaseFile(t *testing.T) {
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
- testutil.AddAttribute(child, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
- testutil.AddAttribute(parent, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
_, err := db.Put(ctx, PutPrm{
@@ -130,7 +137,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("complex objects generated")
+ db.log.Info(ctx, "complex objects generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects deleted by gc marks
@@ -138,7 +145,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -152,7 +159,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("simple objects deleted by gc marks generated")
+ db.log.Info(ctx, "simple objects deleted by gc marks generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(10000)
// simple objects deleted by tombstones
@@ -160,7 +167,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -182,7 +189,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("simple objects deleted by tombstones generated")
+ db.log.Info(ctx, "simple objects deleted by tombstones generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects locked by locks
@@ -190,7 +197,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -209,7 +216,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info("simple objects locked by locks generated")
+ db.log.Info(ctx, "simple objects locked by locks generated")
require.NoError(t, db.boltDB.Sync())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index eef7210dc..4ad83332b 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -99,7 +100,6 @@ const (
// userAttributePrefix was used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
- // removed in version 3
userAttributePrefix
// ====================
@@ -170,6 +170,28 @@ func smallBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, smallPrefix, key)
}
+// attributeBucketName returns _.
+func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
+ key[0] = userAttributePrefix
+ cnr.Encode(key[1:])
+ return append(key[:bucketKeySize], attributeKey...)
+}
+
+func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) {
+ if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
+ return cid.ID{}, false
+ }
+ var result cid.ID
+ return result, result.Decode(bucketName[1:bucketKeySize]) == nil
+}
+
+func attributeFromAttributeBucket(bucketName []byte) (string, bool) {
+ if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
+ return "", false
+ }
+ return string(bucketName[bucketKeySize:]), true
+}
+
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
@@ -210,11 +232,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
epoch := binary.BigEndian.Uint64(key)
var cnr cid.ID
if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err)
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err)
}
var obj oid.ID
if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err)
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err)
}
return epoch, cnr, obj, nil
}
@@ -257,9 +279,7 @@ func objectKey(obj oid.ID, key []byte) []byte {
//
// firstIrregularObjectType(tx, cnr, obj) usage allows getting object type.
func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type {
- if len(objs) == 0 {
- panic("empty object list in firstIrregularObjectType")
- }
+ assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType")
var keys [2][1 + cidSize]byte
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index 048bb9af6..fbc0f1ad9 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
- return fmt.Errorf("can't create auxiliary bucket: %w", err)
+ return fmt.Errorf("create auxiliary bucket: %w", err)
}
return b.Put(versionKey, data)
}
diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go
index 75229a1b4..b373fb32e 100644
--- a/pkg/local_object_storage/metabase/version_test.go
+++ b/pkg/local_object_storage/metabase/version_test.go
@@ -45,25 +45,25 @@ func TestVersion(t *testing.T) {
t.Run("simple", func(t *testing.T) {
db := newDB(t)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
t.Run("reopen", func(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
})
t.Run("old data", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.SetShardID([]byte{1, 2, 3, 4}, mode.ReadWrite))
+ require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
t.Run("invalid version", func(t *testing.T) {
db := newDB(t)
@@ -71,37 +71,37 @@ func TestVersion(t *testing.T) {
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return updateVersion(tx, version+1)
}))
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.Error(t, db.Init())
- require.NoError(t, db.Close())
+ require.Error(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
t.Run("reset", func(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.Reset())
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
})
t.Run("incompleted upgrade", func(t *testing.T) {
db := newDB(t)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue)
}))
- require.ErrorIs(t, db.Init(), ErrIncompletedUpgrade)
- require.NoError(t, db.Close())
+ require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade)
+ require.NoError(t, db.Close(context.Background()))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(shardInfoBucket).Delete(upgradeKey)
}))
- require.NoError(t, db.Init())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
})
}
diff --git a/pkg/local_object_storage/pilorama/batch.go b/pkg/local_object_storage/pilorama/batch.go
index 520c6dfb4..4c5238921 100644
--- a/pkg/local_object_storage/pilorama/batch.go
+++ b/pkg/local_object_storage/pilorama/batch.go
@@ -1,9 +1,9 @@
package pilorama
import (
+ "cmp"
"encoding/binary"
"slices"
- "sort"
"sync"
"time"
@@ -48,8 +48,8 @@ func (b *batch) run() {
// Sorting without a mutex is ok, because we append to this slice only if timer is non-nil.
// See (*boltForest).addBatch for details.
- sort.Slice(b.operations, func(i, j int) bool {
- return b.operations[i].Time < b.operations[j].Time
+ slices.SortFunc(b.operations, func(mi, mj *Move) int {
+ return cmp.Compare(mi.Time, mj.Time)
})
b.operations = slices.CompactFunc(b.operations, func(x, y *Move) bool { return x.Time == y.Time })
diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go
index 22b951a41..3156751f2 100644
--- a/pkg/local_object_storage/pilorama/bench_test.go
+++ b/pkg/local_object_storage/pilorama/bench_test.go
@@ -28,8 +28,8 @@ func BenchmarkCreate(b *testing.B) {
WithPath(filepath.Join(tmpDir, "test.db")),
WithMaxBatchSize(runtime.GOMAXPROCS(0)))
require.NoError(b, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(b, f.Init())
- defer func() { require.NoError(b, f.Close()) }()
+ require.NoError(b, f.Init(context.Background()))
+ defer func() { require.NoError(b, f.Close(context.Background())) }()
b.Cleanup(func() {
require.NoError(b, os.RemoveAll(tmpDir))
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index e2d69cafa..897b37ea0 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -10,7 +10,6 @@ import (
"os"
"path/filepath"
"slices"
- "sort"
"strconv"
"sync"
"time"
@@ -92,7 +91,7 @@ func NewBoltForest(opts ...Option) ForestStorage {
return &b
}
-func (t *boltForest) SetMode(m mode.Mode) error {
+func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
t.modeMtx.Lock()
defer t.modeMtx.Unlock()
@@ -100,14 +99,14 @@ func (t *boltForest) SetMode(m mode.Mode) error {
return nil
}
- err := t.Close()
+ err := t.Close(ctx)
if err == nil && !m.NoMetabase() {
if err = t.openBolt(m); err == nil {
- err = t.Init()
+ err = t.Init(ctx)
}
}
if err != nil {
- return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
+ return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
}
t.mode = m
@@ -129,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
readOnly := m.ReadOnly()
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err))
+ return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err))
}
opts := *bbolt.DefaultOptions
@@ -140,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
t.db, err = bbolt.Open(t.path, t.perm, &opts)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err))
+ return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err))
}
t.db.MaxBatchSize = t.maxBatchSize
@@ -149,7 +148,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
return nil
}
-func (t *boltForest) Init() error {
+func (t *boltForest) Init(context.Context) error {
if t.mode.NoMetabase() || t.db.IsReadOnly() {
return nil
}
@@ -163,7 +162,7 @@ func (t *boltForest) Init() error {
})
}
-func (t *boltForest) Close() error {
+func (t *boltForest) Close(context.Context) error {
var err error
if t.db != nil {
err = t.db.Close()
@@ -420,10 +419,7 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri
return err
}
- i, node, err := t.getPathPrefix(bTree, attr, path)
- if err != nil {
- return err
- }
+ i, node := t.getPathPrefix(bTree, attr, path)
ts := t.getLatestTimestamp(bLog, d.Position, d.Size)
lm = make([]Move, len(path)-i+1)
@@ -559,6 +555,80 @@ func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string
return metaerr.Wrap(err)
}
+func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ m, err := t.filterSeen(cnr, treeID, m)
+ if err != nil {
+ return err
+ }
+ if len(m) == 0 {
+ success = true
+ return nil
+ }
+
+ ch := make(chan error)
+ b := &batch{
+ forest: t,
+ cid: cnr,
+ treeID: treeID,
+ results: []chan<- error{ch},
+ operations: m,
+ }
+ go func() {
+ b.run()
+ }()
+ err = <-ch
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) {
+ t.modeMtx.RLock()
+ defer t.modeMtx.RUnlock()
+
+ if t.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ ops := make([]*Move, 0, len(m))
+ err := t.db.View(func(tx *bbolt.Tx) error {
+ treeRoot := tx.Bucket(bucketName(cnr, treeID))
+ if treeRoot == nil {
+ ops = m
+ return nil
+ }
+ b := treeRoot.Bucket(logBucket)
+ for _, op := range m {
+ var logKey [8]byte
+ binary.BigEndian.PutUint64(logKey[:], op.Time)
+ seen := b.Get(logKey[:]) != nil
+ if !seen {
+ ops = append(ops, op)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, metaerr.Wrap(err)
+ }
+ return ops, nil
+}
+
// TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed.
func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error {
var (
@@ -705,7 +775,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M
key, value = c.Prev()
}
- for i := range len(ms) {
+ for i := range ms {
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
// 2. Insert the operation.
@@ -907,10 +977,7 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st
b := treeRoot.Bucket(dataBucket)
- i, curNodes, err := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
- if err != nil {
- return err
- }
+ i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
if i < len(path)-1 {
return nil
}
@@ -1010,7 +1077,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol
}
// TreeSortedByFilename implements the Forest interface.
-func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) {
+func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
var (
startedAt = time.Now()
success = false
@@ -1088,19 +1155,24 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
}
if len(res) != 0 {
s := string(findAttr(res[len(res)-1].Meta, AttributeFilename))
- last = &s
+ last = NewCursor(s, res[len(res)-1].LastChild())
}
return res, last, metaerr.Wrap(err)
}
-func sortAndCut(result []NodeInfo, last *string) []NodeInfo {
+func sortByFilename(nodes []NodeInfo) {
+ slices.SortFunc(nodes, func(a, b NodeInfo) int {
+ return bytes.Compare(a.Meta.GetAttr(AttributeFilename), b.Meta.GetAttr(AttributeFilename))
+ })
+}
+
+func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo {
var lastBytes []byte
if last != nil {
- lastBytes = []byte(*last)
+ lastBytes = []byte(last.GetFilename())
}
- sort.Slice(result, func(i, j int) bool {
- return bytes.Compare(result[i].Meta.GetAttr(AttributeFilename), result[j].Meta.GetAttr(AttributeFilename)) == -1
- })
+ sortByFilename(result)
+
for i := range result {
if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 {
return result[i:]
@@ -1162,7 +1234,7 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f
nodes = nil
length = actualLength + 1
count = 0
- c.Seek(append(prefix, byte(length), byte(length>>8)))
+ c.Seek(binary.LittleEndian.AppendUint16(prefix, length))
c.Prev() // c.Next() will be performed by for loop
}
}
@@ -1282,7 +1354,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err
return nil
})
if err != nil {
- return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err))
+ return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err))
}
success = true
return ids, nil
@@ -1426,7 +1498,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
var contID cidSDK.ID
if err := contID.Decode(k[:32]); err != nil {
- return fmt.Errorf("failed to decode containerID: %w", err)
+ return fmt.Errorf("decode container ID: %w", err)
}
res.Items = append(res.Items, ContainerIDTreeID{
CID: contID,
@@ -1434,8 +1506,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
})
if len(res.Items) == batchSize {
- res.NextPageToken = make([]byte, len(k))
- copy(res.NextPageToken, k)
+ res.NextPageToken = bytes.Clone(k)
break
}
}
@@ -1448,7 +1519,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
return &res, nil
}
-func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node, error) {
+func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) {
c := bTree.Cursor()
var curNodes []Node
@@ -1471,14 +1542,14 @@ func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr strin
}
if len(nextNodes) == 0 {
- return i, curNodes, nil
+ return i, curNodes
}
}
- return len(path), nextNodes, nil
+ return len(path), nextNodes
}
-func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) {
+func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) {
c := bTree.Cursor()
var curNode Node
@@ -1498,10 +1569,10 @@ loop:
childKey, value = c.Next()
}
- return i, curNode, nil
+ return i, curNode
}
- return len(path), curNode, nil
+ return len(path), curNode
}
func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
@@ -1511,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:])
- return lm.Meta.FromBytes(data[16:])
+ return lm.FromBytes(data[16:])
}
func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter()
- size := 8 + 8 + lm.Meta.Size() + 1
+ size := 8 + 8 + lm.Size() + 1
// if lm.HasOld {
// size += 8 + lm.Old.Meta.Size()
// }
@@ -1524,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size)
w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent)
- lm.Meta.EncodeBinary(w.BinWriter)
+ lm.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld)
// if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent)
@@ -1586,7 +1657,7 @@ func internalKeyPrefix(key []byte, k string) []byte {
key = append(key, 'i')
l := len(k)
- key = append(key, byte(l), byte(l>>8))
+ key = binary.LittleEndian.AppendUint16(key, uint16(l))
key = append(key, k...)
return key
}
@@ -1601,14 +1672,10 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte {
key = internalKeyPrefix(key, k)
l := len(v)
- key = append(key, byte(l), byte(l>>8))
+ key = binary.LittleEndian.AppendUint16(key, uint16(l))
key = append(key, v...)
- var raw [8]byte
- binary.LittleEndian.PutUint64(raw[:], parent)
- key = append(key, raw[:]...)
-
- binary.LittleEndian.PutUint64(raw[:], node)
- key = append(key, raw[:]...)
+ key = binary.LittleEndian.AppendUint64(key, parent)
+ key = binary.LittleEndian.AppendUint64(key, node)
return key
}
diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go
index 78503bada..ebfd0bcc0 100644
--- a/pkg/local_object_storage/pilorama/forest.go
+++ b/pkg/local_object_storage/pilorama/forest.go
@@ -1,10 +1,10 @@
package pilorama
import (
- "bytes"
"context"
"errors"
"fmt"
+ "slices"
"sort"
"strings"
@@ -85,8 +85,7 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID
s.operations = append(s.operations, op)
}
- mCopy := make([]KeyValue, len(m))
- copy(mCopy, m)
+ mCopy := slices.Clone(m)
op := s.do(&Move{
Parent: node,
Meta: Meta{
@@ -112,7 +111,16 @@ func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, o
return s.Apply(op)
}
-func (f *memoryForest) Init() error {
+func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error {
+ for _, op := range ops {
+ if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (f *memoryForest) Init(context.Context) error {
return nil
}
@@ -120,11 +128,11 @@ func (f *memoryForest) Open(context.Context, mode.Mode) error {
return nil
}
-func (f *memoryForest) SetMode(mode.Mode) error {
+func (f *memoryForest) SetMode(context.Context, mode.Mode) error {
return nil
}
-func (f *memoryForest) Close() error {
+func (f *memoryForest) Close(context.Context) error {
return nil
}
func (f *memoryForest) SetParentID(string) {}
@@ -156,7 +164,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string,
}
// TreeSortedByFilename implements the Forest interface.
-func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) {
+func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -169,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
var res []NodeInfo
for _, nodeID := range nodeIDs {
- children := s.tree.getChildren(nodeID)
+ children := s.getChildren(nodeID)
for _, childID := range children {
var found bool
for _, kv := range s.infoMap[childID].Meta.Items {
@@ -192,23 +200,18 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
return nil, start, nil
}
- sort.Slice(res, func(i, j int) bool {
- return bytes.Compare(res[i].Meta.GetAttr(AttributeFilename), res[j].Meta.GetAttr(AttributeFilename)) == -1
- })
+ sortByFilename(res)
r := mergeNodeInfos(res)
for i := range r {
- if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start {
- finish := i + count
- if len(res) < finish {
- finish = len(res)
- }
+ if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() {
+ finish := min(len(res), i+count)
last := string(findAttr(r[finish-1].Meta, AttributeFilename))
- return r[i:finish], &last, nil
+ return r[i:finish], NewCursor(last, 0), nil
}
}
last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
- return nil, &last, nil
+ return nil, NewCursor(last, 0), nil
}
// TreeGetChildren implements the Forest interface.
@@ -219,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str
return nil, ErrTreeNotFound
}
- children := s.tree.getChildren(nodeID)
+ children := s.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children))
for _, childID := range children {
res = append(res, NodeInfo{
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 854fe0aad..844084c55 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -30,7 +30,7 @@ var providers = []struct {
{"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
f := NewMemoryForest()
require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init())
+ require.NoError(t, f.Init(context.Background()))
return f
}},
{"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
@@ -40,7 +40,7 @@ var providers = []struct {
WithMaxBatchSize(1),
}, opts...)...)
require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init())
+ require.NoError(t, f.Init(context.Background()))
return f
}},
}
@@ -61,7 +61,7 @@ func TestForest_TreeMove(t *testing.T) {
}
func testForestTreeMove(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -125,7 +125,7 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
}
func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -247,7 +247,7 @@ func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
}
func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
}
var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *string, count int) *string {
+ treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err)
result = append(result, res...)
@@ -302,7 +302,7 @@ func TestForest_TreeSortedIteration(t *testing.T) {
}
func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
}
var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *string, count int) *string {
+ treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err)
result = append(result, res...)
@@ -361,7 +361,7 @@ func TestForest_TreeSortedFilename(t *testing.T) {
}
func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
const controlAttr = "control_attr"
cid := cidtest.ID()
@@ -453,7 +453,7 @@ func TestForest_TreeDrop(t *testing.T) {
}
func testForestTreeDrop(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
const cidsSize = 3
var cids [cidsSize]cidSDK.ID
@@ -523,7 +523,7 @@ func TestForest_TreeAdd(t *testing.T) {
}
func testForestTreeAdd(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -571,7 +571,7 @@ func TestForest_TreeAddByPath(t *testing.T) {
}
func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -709,7 +709,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
t.Run("add a child, then insert a parent removal", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
@@ -722,7 +722,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
})
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
testApply(t, s, 11, 10, meta)
@@ -792,7 +792,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
t.Run("expected", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
for i := range logs {
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
@@ -801,7 +801,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
})
s := constructor(t, WithMaxBatchSize(batchSize))
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
for range batchSize {
@@ -842,7 +842,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
}
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
t.Run("empty log, no panic", func(t *testing.T) {
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
@@ -883,7 +883,7 @@ func TestForest_TreeExists(t *testing.T) {
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
actual, err := s.TreeExists(context.Background(), cid, treeID)
@@ -942,7 +942,7 @@ func TestApplyTricky1(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1005,7 +1005,7 @@ func TestApplyTricky2(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1081,7 +1081,7 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
}
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
- for i := uint64(0); i < uint64(nodeCount); i++ {
+ for i := range uint64(nodeCount) {
expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
@@ -1115,7 +1115,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close()) }()
+ defer func() { require.NoError(t, expected.Close(context.Background())) }()
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1145,7 +1145,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
wg.Wait()
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close())
+ require.NoError(t, actual.Close(context.Background()))
}
}
@@ -1163,7 +1163,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close()) }()
+ defer func() { require.NoError(t, expected.Close(context.Background())) }()
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1179,7 +1179,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close())
+ require.NoError(t, actual.Close(context.Background()))
}
}
@@ -1197,7 +1197,7 @@ func BenchmarkApplySequential(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close()) }()
+ defer func() { require.NoError(b, s.Close(context.Background())) }()
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
@@ -1233,7 +1233,7 @@ func BenchmarkApplyReorderLast(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close()) }()
+ defer func() { require.NoError(b, s.Close(context.Background())) }()
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
@@ -1290,7 +1290,7 @@ func TestTreeGetByPath(t *testing.T) {
}
func testTreeGetByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
treeID := "version"
@@ -1369,7 +1369,7 @@ func TestGetTrees(t *testing.T) {
}
func testTreeGetTrees(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close()) }()
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
d := CIDDescriptor{Position: 0, Size: 1}
@@ -1415,7 +1415,7 @@ func TestTreeLastSyncHeight(t *testing.T) {
}
func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
- defer func() { require.NoError(t, f.Close()) }()
+ defer func() { require.NoError(t, f.Close(context.Background())) }()
cnr := cidtest.ID()
treeID := "someTree"
diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go
index 5a00bcf7a..b035be1e1 100644
--- a/pkg/local_object_storage/pilorama/heap.go
+++ b/pkg/local_object_storage/pilorama/heap.go
@@ -30,13 +30,13 @@ func (h *filenameHeap) Pop() any {
// fixedHeap maintains a fixed number of smallest elements started at some point.
type fixedHeap struct {
- start *string
+ start *Cursor
sorted bool
count int
h *filenameHeap
}
-func newHeap(start *string, count int) *fixedHeap {
+func newHeap(start *Cursor, count int) *fixedHeap {
h := new(filenameHeap)
heap.Init(h)
@@ -50,8 +50,19 @@ func newHeap(start *string, count int) *fixedHeap {
const amortizationMultiplier = 5
func (h *fixedHeap) push(id MultiNode, filename string) bool {
- if h.start != nil && filename <= *h.start {
- return false
+ if h.start != nil {
+ if filename < h.start.GetFilename() {
+ return false
+ } else if filename == h.start.GetFilename() {
+ // A tree may have a lot of nodes with the same filename but different versions so that
+ // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call
+ // with the same filename.
+ pos := slices.Index(id, h.start.GetNode())
+ if pos == -1 || pos+1 >= len(id) {
+ return false
+ }
+ id = id[pos+1:]
+ }
}
*h.h = append(*h.h, heapInfo{id: id, filename: filename})
diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go
index c9f5df3b7..28b7faec8 100644
--- a/pkg/local_object_storage/pilorama/inmemory.go
+++ b/pkg/local_object_storage/pilorama/inmemory.go
@@ -1,6 +1,9 @@
package pilorama
-import "sort"
+import (
+ "cmp"
+ "slices"
+)
// nodeInfo couples parent and metadata.
type nodeInfo struct {
@@ -32,9 +35,9 @@ func newMemoryTree() *memoryTree {
// undo un-does op and changes s in-place.
func (s *memoryTree) undo(op *move) {
if op.HasOld {
- s.tree.infoMap[op.Child] = op.Old
+ s.infoMap[op.Child] = op.Old
} else {
- delete(s.tree.infoMap, op.Child)
+ delete(s.infoMap, op.Child)
}
}
@@ -80,8 +83,8 @@ func (s *memoryTree) do(op *Move) move {
},
}
- shouldPut := !s.tree.isAncestor(op.Child, op.Parent)
- p, ok := s.tree.infoMap[op.Child]
+ shouldPut := !s.isAncestor(op.Child, op.Parent)
+ p, ok := s.infoMap[op.Child]
if ok {
lm.HasOld = true
lm.Old = p
@@ -97,7 +100,7 @@ func (s *memoryTree) do(op *Move) move {
p.Meta = m
p.Parent = op.Parent
- s.tree.infoMap[op.Child] = p
+ s.infoMap[op.Child] = p
return lm
}
@@ -131,10 +134,10 @@ func (t tree) getChildren(parent Node) []Node {
}
}
- sort.Slice(children, func(i, j int) bool {
- a := t.infoMap[children[i]]
- b := t.infoMap[children[j]]
- return a.Meta.Time < b.Meta.Time
+ slices.SortFunc(children, func(ci, cj uint64) int {
+ a := t.infoMap[ci]
+ b := t.infoMap[cj]
+ return cmp.Compare(a.Meta.Time, b.Meta.Time)
})
return children
}
@@ -189,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
}
var nodes []Node
- var lastTs Timestamp
+ var lastTS Timestamp
children := t.getChildren(curNode)
for i := range children {
@@ -197,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
fileName := string(info.Meta.GetAttr(attr))
if fileName == path[len(path)-1] {
if latest {
- if info.Meta.Time >= lastTs {
+ if info.Meta.Time >= lastTS {
nodes = append(nodes[:0], children[i])
}
} else {
diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go
index 61a3849bf..e1f6cd8e7 100644
--- a/pkg/local_object_storage/pilorama/interface.go
+++ b/pkg/local_object_storage/pilorama/interface.go
@@ -21,6 +21,8 @@ type Forest interface {
// TreeApply applies replicated operation from another node.
// If background is true, TreeApply will first check whether an operation exists.
TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
+ // TreeApplyBatch applies replicated operations from another node.
+ TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error
// TreeGetByPath returns all nodes corresponding to the path.
// The path is constructed by descending from the root using the values of the
// AttributeFilename in meta.
@@ -35,7 +37,7 @@ type Forest interface {
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
// TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error)
+ TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
@@ -60,10 +62,10 @@ type Forest interface {
type ForestStorage interface {
// DumpInfo returns information about the pilorama.
DumpInfo() Info
- Init() error
+ Init(context.Context) error
Open(context.Context, mode.Mode) error
- Close() error
- SetMode(m mode.Mode) error
+ Close(context.Context) error
+ SetMode(context.Context, mode.Mode) error
SetParentID(id string)
Forest
@@ -77,6 +79,38 @@ const (
AttributeVersion = "Version"
)
+// Cursor keeps state between function calls for traversing nodes.
+// It stores the attributes associated with a previous call, allowing subsequent operations
+// to resume traversal from this point rather than starting from the beginning.
+type Cursor struct {
+ // Last traversed filename.
+ filename string
+
+ // Last traversed node.
+ node Node
+}
+
+func NewCursor(filename string, node Node) *Cursor {
+ return &Cursor{
+ filename: filename,
+ node: node,
+ }
+}
+
+func (c *Cursor) GetFilename() string {
+ if c == nil {
+ return ""
+ }
+ return c.filename
+}
+
+func (c *Cursor) GetNode() Node {
+ if c == nil {
+ return Node(0)
+ }
+ return c.node
+}
+
// CIDDescriptor contains container ID and information about the node position
// in the list of container nodes.
type CIDDescriptor struct {
diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go
index 01d3da9f0..0c042aa56 100644
--- a/pkg/local_object_storage/pilorama/mode_test.go
+++ b/pkg/local_object_storage/pilorama/mode_test.go
@@ -19,13 +19,13 @@ func Test_Mode(t *testing.T) {
require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init())
+ require.NoError(t, f.Init(context.Background()))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close())
+ require.NoError(t, f.Close(context.Background()))
require.NoError(t, f.Open(context.Background(), mode.Degraded))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init())
+ require.NoError(t, f.Init(context.Background()))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close())
+ require.NoError(t, f.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go
index 106ba6ae9..36d347f10 100644
--- a/pkg/local_object_storage/pilorama/multinode.go
+++ b/pkg/local_object_storage/pilorama/multinode.go
@@ -25,6 +25,10 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool {
return true
}
+func (r *MultiNodeInfo) LastChild() Node {
+ return r.Children[len(r.Children)-1]
+}
+
func (n NodeInfo) ToMultiNode() MultiNodeInfo {
return MultiNodeInfo{
Children: MultiNode{n.ID},
diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go
index 54c2b90a6..eecee1527 100644
--- a/pkg/local_object_storage/pilorama/split_test.go
+++ b/pkg/local_object_storage/pilorama/split_test.go
@@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) {
require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4"))
require.Equal(t, []byte{10}, testGetByPath(t, "value0"))
- testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) {
+ testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) {
res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize)
require.NoError(t, err)
return res, last
diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go
index 364649b50..b4015ae8d 100644
--- a/pkg/local_object_storage/shard/container.go
+++ b/pkg/local_object_storage/shard/container.go
@@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 {
return r.size
}
-func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
+func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
s.m.RLock()
defer s.m.RUnlock()
@@ -34,9 +34,15 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
return ContainerSizeRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ContainerSizeRes{}, err
+ }
+ defer release()
+
size, err := s.metaBase.ContainerSize(prm.cnr)
if err != nil {
- return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err)
+ return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err)
}
return ContainerSizeRes{
@@ -69,9 +75,15 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont
return ContainerCountRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ContainerCountRes{}, err
+ }
+ defer release()
+
counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
if err != nil {
- return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err)
+ return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err)
}
return ContainerCountRes{
@@ -100,6 +112,12 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
return s.metaBase.DeleteContainerSize(ctx, id)
}
@@ -122,5 +140,11 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
return s.metaBase.DeleteContainerCount(ctx, id)
}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index de881654a..d489b8b0d 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -19,25 +20,25 @@ import (
"golang.org/x/sync/errgroup"
)
-func (s *Shard) handleMetabaseFailure(stage string, err error) error {
- s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
+func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error {
+ s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.ReadOnly),
zap.Error(err))
- err = s.SetMode(mode.ReadOnly)
+ err = s.SetMode(ctx, mode.ReadOnly)
if err == nil {
return nil
}
- s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
+ s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.DegradedReadOnly),
zap.Error(err))
- err = s.SetMode(mode.DegradedReadOnly)
+ err = s.SetMode(ctx, mode.DegradedReadOnly)
if err != nil {
- return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly))
+ return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly)
}
return nil
}
@@ -71,10 +72,10 @@ func (s *Shard) Open(ctx context.Context) error {
for j := i + 1; j < len(components); j++ {
if err := components[j].Open(ctx, m); err != nil {
// Other components must be opened, fail.
- return fmt.Errorf("could not open %T: %w", components[j], err)
+ return fmt.Errorf("open %T: %w", components[j], err)
}
}
- err = s.handleMetabaseFailure("open", err)
+ err = s.handleMetabaseFailure(ctx, "open", err)
if err != nil {
return err
}
@@ -82,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error {
break
}
- return fmt.Errorf("could not open %T: %w", component, err)
+ return fmt.Errorf("open %T: %w", component, err)
}
}
return nil
@@ -90,8 +91,8 @@ func (s *Shard) Open(ctx context.Context) error {
type metabaseSynchronizer Shard
-func (x *metabaseSynchronizer) Init() error {
- ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init")
+func (x *metabaseSynchronizer) Init(ctx context.Context) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init")
defer span.End()
return (*Shard)(x).refillMetabase(ctx)
@@ -100,26 +101,24 @@ func (x *metabaseSynchronizer) Init() error {
// Init initializes all Shard's components.
func (s *Shard) Init(ctx context.Context) error {
m := s.GetMode()
- if err := s.initializeComponents(m); err != nil {
+ if err := s.initializeComponents(ctx, m); err != nil {
return err
}
s.updateMetrics(ctx)
s.gc = &gc{
- gcCfg: &s.gcCfg,
- remover: s.removeGarbage,
- stopChannel: make(chan struct{}),
- eventChan: make(chan Event),
- mEventHandler: map[eventType]*eventHandlers{
- eventNewEpoch: {
- cancelFunc: func() {},
- handlers: []eventHandler{
- s.collectExpiredLocks,
- s.collectExpiredObjects,
- s.collectExpiredTombstones,
- s.collectExpiredMetrics,
- },
+ gcCfg: &s.gcCfg,
+ remover: s.removeGarbage,
+ stopChannel: make(chan struct{}),
+ newEpochChan: make(chan uint64),
+ newEpochHandlers: &newEpochHandlers{
+ cancelFunc: func() {},
+ handlers: []newEpochHandler{
+ s.collectExpiredLocks,
+ s.collectExpiredObjects,
+ s.collectExpiredTombstones,
+ s.collectExpiredMetrics,
},
},
}
@@ -137,9 +136,9 @@ func (s *Shard) Init(ctx context.Context) error {
return nil
}
-func (s *Shard) initializeComponents(m mode.Mode) error {
+func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
type initializer interface {
- Init() error
+ Init(context.Context) error
}
var components []initializer
@@ -169,13 +168,13 @@ func (s *Shard) initializeComponents(m mode.Mode) error {
}
for _, component := range components {
- if err := component.Init(); err != nil {
+ if err := component.Init(ctx); err != nil {
if component == s.metaBase {
if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) {
return fmt.Errorf("metabase initialization: %w", err)
}
- err = s.handleMetabaseFailure("init", err)
+ err = s.handleMetabaseFailure(ctx, "init", err)
if err != nil {
return err
}
@@ -183,7 +182,7 @@ func (s *Shard) initializeComponents(m mode.Mode) error {
break
}
- return fmt.Errorf("could not initialize %T: %w", component, err)
+ return fmt.Errorf("initialize %T: %w", component, err)
}
}
return nil
@@ -204,19 +203,19 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err := s.metaBase.Reset()
if err != nil {
- return fmt.Errorf("could not reset metabase: %w", err)
+ return fmt.Errorf("reset metabase: %w", err)
}
withCount := true
totalObjects, err := s.blobStor.ObjectsCount(ctx)
if err != nil {
- s.log.Warn(logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
+ s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
withCount = false
}
eg, egCtx := errgroup.WithContext(ctx)
- if s.cfg.refillMetabaseWorkersCount > 0 {
- eg.SetLimit(s.cfg.refillMetabaseWorkersCount)
+ if s.refillMetabaseWorkersCount > 0 {
+ eg.SetLimit(s.refillMetabaseWorkersCount)
}
var completedCount uint64
@@ -253,12 +252,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err = errors.Join(egErr, itErr)
if err != nil {
- return fmt.Errorf("could not put objects to the meta: %w", err)
+ return fmt.Errorf("put objects to the meta: %w", err)
}
err = s.metaBase.SyncCounters()
if err != nil {
- return fmt.Errorf("could not sync object counters: %w", err)
+ return fmt.Errorf("sync object counters: %w", err)
}
success = true
@@ -269,12 +268,27 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error {
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- s.log.Warn(logs.ShardCouldNotUnmarshalObject,
+ s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
zap.Stringer("address", addr),
- zap.String("err", err.Error()))
+ zap.Error(err))
return nil
}
+ hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0
+
+ var isIndexedContainer bool
+ if hasIndexedAttribute {
+ info, err := s.containerInfo.Info(ctx, addr.Container())
+ if err != nil {
+ return err
+ }
+ if info.Removed {
+ s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
+ return nil
+ }
+ isIndexedContainer = info.Indexed
+ }
+
var err error
switch obj.Type() {
case objectSDK.TypeTombstone:
@@ -290,6 +304,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
var mPrm meta.PutPrm
mPrm.SetObject(obj)
mPrm.SetStorageID(descriptor)
+ mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer)
_, err = s.metaBase.Put(ctx, mPrm)
if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
@@ -301,7 +316,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("could not unmarshal lock content: %w", err)
+ return fmt.Errorf("unmarshal lock content: %w", err)
}
locked := make([]oid.ID, lock.NumberOfMembers())
@@ -311,7 +326,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err
id, _ := obj.ID()
err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
- return fmt.Errorf("could not lock objects: %w", err)
+ return fmt.Errorf("lock objects: %w", err)
}
return nil
}
@@ -320,7 +335,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("could not unmarshal tombstone content: %w", err)
+ return fmt.Errorf("unmarshal tombstone content: %w", err)
}
tombAddr := object.AddressOf(obj)
@@ -341,17 +356,18 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
_, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
- return fmt.Errorf("could not inhume objects: %w", err)
+ return fmt.Errorf("inhume objects: %w", err)
}
return nil
}
// Close releases all Shard's components.
-func (s *Shard) Close() error {
+func (s *Shard) Close(ctx context.Context) error {
+ unlock := s.lockExclusive()
if s.rb != nil {
- s.rb.Stop(s.log)
+ s.rb.Stop(ctx, s.log)
}
- components := []interface{ Close() error }{}
+ var components []interface{ Close(context.Context) error }
if s.pilorama != nil {
components = append(components, s.pilorama)
@@ -367,15 +383,23 @@ func (s *Shard) Close() error {
var lastErr error
for _, component := range components {
- if err := component.Close(); err != nil {
+ if err := component.Close(ctx); err != nil {
lastErr = err
- s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
+ s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err))
}
}
+ if s.opsLimiter != nil {
+ s.opsLimiter.Close()
+ }
+
+ unlock()
+
+ // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock.
+ // So to prevent deadlock GC stopping is outside of exclusive lock.
// If Init/Open was unsuccessful gc can be nil.
if s.gc != nil {
- s.gc.stop()
+ s.gc.stop(ctx)
}
return lastErr
@@ -397,18 +421,18 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
unlock := s.lockExclusive()
defer unlock()
- s.rb.Stop(s.log)
+ s.rb.Stop(ctx, s.log)
if !s.info.Mode.NoMetabase() {
defer func() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}()
}
- ok, err := s.metaBase.Reload(c.metaOpts...)
+ ok, err := s.metaBase.Reload(ctx, c.metaOpts...)
if err != nil {
if errors.Is(err, meta.ErrDegradedMode) {
- s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
- _ = s.setMode(mode.DegradedReadOnly)
+ s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
+ _ = s.setMode(ctx, mode.DegradedReadOnly)
}
return err
}
@@ -420,15 +444,19 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
// config after the node was updated.
err = s.refillMetabase(ctx)
} else {
- err = s.metaBase.Init()
+ err = s.metaBase.Init(ctx)
}
if err != nil {
- s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
- _ = s.setMode(mode.DegradedReadOnly)
+ s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
+ _ = s.setMode(ctx, mode.DegradedReadOnly)
return err
}
}
- return s.setMode(c.info.Mode)
+ if c.opsLimiter != nil {
+ s.opsLimiter.Close()
+ s.opsLimiter = c.opsLimiter
+ }
+ return s.setMode(ctx, c.info.Mode)
}
func (s *Shard) lockExclusive() func() {
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 44fee1636..6d2cd7137 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -86,7 +86,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadWrite, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
// Metabase can be opened in read-only => start in ReadOnly mode.
allowedMode.Store(int64(os.O_RDONLY))
@@ -95,9 +95,9 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.Error(t, sh.SetMode(mode.ReadWrite))
+ require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
// Metabase is corrupted => start in DegradedReadOnly mode.
allowedMode.Store(math.MaxInt64)
@@ -106,7 +106,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.DegradedReadOnly, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
}
func TestRefillMetabaseCorrupted(t *testing.T) {
@@ -126,7 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
@@ -146,7 +146,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
putPrm.SetObject(obj)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
addr := object.AddressOf(obj)
// This is copied from `fstree.treePath()` to avoid exporting function just for tests.
@@ -170,7 +170,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
getPrm.SetAddress(addr)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
}
func TestRefillMetabase(t *testing.T) {
@@ -190,7 +190,7 @@ func TestRefillMetabase(t *testing.T) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
@@ -216,7 +216,7 @@ func TestRefillMetabase(t *testing.T) {
locked := make([]oid.ID, 1, 2)
locked[0] = oidtest.ID()
cnrLocked := cidtest.ID()
- for i := uint64(0); i < objNum; i++ {
+ for range objNum {
obj := objecttest.Object()
obj.SetType(objectSDK.TypeRegular)
@@ -358,7 +358,7 @@ func TestRefillMetabase(t *testing.T) {
phyBefore := c.Phy
logicalBefore := c.Logic
- err = sh.Close()
+ err = sh.Close(context.Background())
require.NoError(t, err)
sh = New(
@@ -379,7 +379,7 @@ func TestRefillMetabase(t *testing.T) {
// initialize Blobstor
require.NoError(t, sh.Init(context.Background()))
- defer sh.Close()
+ defer sh.Close(context.Background())
checkAllObjs(false)
checkObj(object.AddressOf(tombObj), nil)
diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go
index b3bc6a30b..8dc1f0522 100644
--- a/pkg/local_object_storage/shard/count.go
+++ b/pkg/local_object_storage/shard/count.go
@@ -23,6 +23,12 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) {
return 0, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
+
cc, err := s.metaBase.ObjectCounters()
if err != nil {
return 0, err
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index c898fdf41..0101817a8 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -55,6 +54,12 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del
return DeleteRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return DeleteRes{}, err
+ }
+ defer release()
+
result := DeleteRes{}
for _, addr := range prm.addr {
select {
@@ -95,7 +100,7 @@ func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr
}
_, err := s.writeCache.Head(ctx, addr)
if err == nil {
- s.log.Warn(logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
+ s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
return fmt.Errorf("object %s must be flushed from writecache", addr)
}
if client.IsErrObjectNotFound(err) {
@@ -110,10 +115,9 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
res, err := s.metaBase.StorageID(ctx, sPrm)
if err != nil {
- s.log.Debug(logs.StorageIDRetrievalFailure,
+ s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
zap.Stringer("object", addr),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return err
}
storageID := res.StorageID()
@@ -130,10 +134,9 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
_, err = s.blobStor.Delete(ctx, delPrm)
if err != nil && !client.IsErrObjectNotFound(err) {
- s.log.Debug(logs.ObjectRemovalFailureBlobStor,
+ s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
zap.Stringer("object_address", addr),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return err
}
return nil
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index 9f205fa5d..c9ce93bc5 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -3,7 +3,6 @@ package shard
import (
"context"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
@@ -38,7 +37,7 @@ func TestShard_Delete_BigObject(t *testing.T) {
func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -58,19 +57,14 @@ func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
if hasWriteCache {
- sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false})
- require.Eventually(t, func() bool {
- _, err = sh.Delete(context.Background(), delPrm)
- return err == nil
- }, 30*time.Second, 10*time.Millisecond)
- } else {
- _, err = sh.Delete(context.Background(), delPrm)
- require.NoError(t, err)
+ require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false}))
}
+ _, err = sh.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index 784bf293a..2c11b6b01 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -18,7 +18,7 @@ type ExistsPrm struct {
// Exists option to set object checked for existence.
Address oid.Address
// Exists option to set parent object checked for existence.
- ParentAddress oid.Address
+ ECParentAddress oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
@@ -53,10 +53,6 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
))
defer span.End()
- var exists bool
- var locked bool
- var err error
-
s.m.RLock()
defer s.m.RUnlock()
@@ -64,7 +60,18 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
return ExistsRes{}, ErrShardDisabled
} else if s.info.EvacuationInProgress {
return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
- } else if s.info.Mode.NoMetabase() {
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ExistsRes{}, err
+ }
+ defer release()
+
+ var exists bool
+ var locked bool
+
+ if s.info.Mode.NoMetabase() {
var p common.ExistsPrm
p.Address = prm.Address
@@ -74,7 +81,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
} else {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(prm.Address)
- existsPrm.SetParent(prm.ParentAddress)
+ existsPrm.SetECParent(prm.ECParentAddress)
var res meta.ExistsRes
res, err = s.metaBase.Exists(ctx, existsPrm)
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index d605746e8..a262a52cb 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -6,11 +6,13 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -31,41 +33,14 @@ type TombstoneSource interface {
IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool
}
-// Event represents class of external events.
-type Event interface {
- typ() eventType
-}
+type newEpochHandler func(context.Context, uint64)
-type eventType int
-
-const (
- _ eventType = iota
- eventNewEpoch
-)
-
-type newEpoch struct {
- epoch uint64
-}
-
-func (e newEpoch) typ() eventType {
- return eventNewEpoch
-}
-
-// EventNewEpoch returns new epoch event.
-func EventNewEpoch(e uint64) Event {
- return newEpoch{
- epoch: e,
- }
-}
-
-type eventHandler func(context.Context, Event)
-
-type eventHandlers struct {
+type newEpochHandlers struct {
prevGroup sync.WaitGroup
cancelFunc context.CancelFunc
- handlers []eventHandler
+ handlers []newEpochHandler
}
type gcRunResult struct {
@@ -107,10 +82,10 @@ type gc struct {
remover func(context.Context) gcRunResult
- // eventChan is used only for listening for the new epoch event.
+ // newEpochChan is used only for listening for the new epoch event.
// It is ok to keep opened, we are listening for context done when writing in it.
- eventChan chan Event
- mEventHandler map[eventType]*eventHandlers
+ newEpochChan chan uint64
+ newEpochHandlers *newEpochHandlers
}
type gcCfg struct {
@@ -131,7 +106,7 @@ type gcCfg struct {
func defaultGCCfg() gcCfg {
return gcCfg{
removerInterval: 10 * time.Second,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
workerPoolInit: func(int) util.WorkerPool {
return nil
},
@@ -140,16 +115,8 @@ func defaultGCCfg() gcCfg {
}
func (gc *gc) init(ctx context.Context) {
- sz := 0
-
- for _, v := range gc.mEventHandler {
- sz += len(v.handlers)
- }
-
- if sz > 0 {
- gc.workerPool = gc.workerPoolInit(sz)
- }
-
+ gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers))
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
gc.wg.Add(2)
go gc.tickRemover(ctx)
go gc.listenEvents(ctx)
@@ -161,14 +128,14 @@ func (gc *gc) listenEvents(ctx context.Context) {
for {
select {
case <-gc.stopChannel:
- gc.log.Warn(logs.ShardStopEventListenerByClosedStopChannel)
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel)
return
case <-ctx.Done():
- gc.log.Warn(logs.ShardStopEventListenerByContext)
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
return
- case event, ok := <-gc.eventChan:
+ case event, ok := <-gc.newEpochChan:
if !ok {
- gc.log.Warn(logs.ShardStopEventListenerByClosedEventChannel)
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
return
}
@@ -177,43 +144,38 @@ func (gc *gc) listenEvents(ctx context.Context) {
}
}
-func (gc *gc) handleEvent(ctx context.Context, event Event) {
- v, ok := gc.mEventHandler[event.typ()]
- if !ok {
- return
- }
-
- v.cancelFunc()
- v.prevGroup.Wait()
+func (gc *gc) handleEvent(ctx context.Context, epoch uint64) {
+ gc.newEpochHandlers.cancelFunc()
+ gc.newEpochHandlers.prevGroup.Wait()
var runCtx context.Context
- runCtx, v.cancelFunc = context.WithCancel(ctx)
+ runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx)
- v.prevGroup.Add(len(v.handlers))
+ gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers))
- for i := range v.handlers {
+ for i := range gc.newEpochHandlers.handlers {
select {
case <-ctx.Done():
return
default:
}
- h := v.handlers[i]
+ h := gc.newEpochHandlers.handlers[i]
err := gc.workerPool.Submit(func() {
- defer v.prevGroup.Done()
- h(runCtx, event)
+ defer gc.newEpochHandlers.prevGroup.Done()
+ h(runCtx, epoch)
})
if err != nil {
- gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
- zap.String("error", err.Error()),
+ gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
+ zap.Error(err),
)
- v.prevGroup.Done()
+ gc.newEpochHandlers.prevGroup.Done()
}
}
}
-func (gc *gc) releaseResources() {
+func (gc *gc) releaseResources(ctx context.Context) {
if gc.workerPool != nil {
gc.workerPool.Release()
}
@@ -222,7 +184,7 @@ func (gc *gc) releaseResources() {
// because it is possible that we are close it earlier than stop writing.
// It is ok to keep it opened.
- gc.log.Debug(logs.ShardGCIsStopped)
+ gc.log.Debug(ctx, logs.ShardGCIsStopped)
}
func (gc *gc) tickRemover(ctx context.Context) {
@@ -236,10 +198,10 @@ func (gc *gc) tickRemover(ctx context.Context) {
case <-ctx.Done():
// Context canceled earlier than we start to close shards.
// It make sense to stop collecting garbage by context too.
- gc.releaseResources()
+ gc.releaseResources(ctx)
return
case <-gc.stopChannel:
- gc.releaseResources()
+ gc.releaseResources(ctx)
return
case <-timer.C:
startedAt := time.Now()
@@ -258,13 +220,16 @@ func (gc *gc) tickRemover(ctx context.Context) {
}
}
-func (gc *gc) stop() {
+func (gc *gc) stop(ctx context.Context) {
gc.onceStop.Do(func() {
close(gc.stopChannel)
})
- gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
+ gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
+
+ gc.newEpochHandlers.cancelFunc()
+ gc.newEpochHandlers.prevGroup.Wait()
}
// iterates over metabase and deletes objects
@@ -286,8 +251,47 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
return
}
- s.log.Debug(logs.ShardGCRemoveGarbageStarted)
- defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted)
+ s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted)
+ defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted)
+
+ buf, err := s.getGarbage(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
+ zap.Error(err),
+ )
+
+ return
+ } else if len(buf) == 0 {
+ result.success = true
+ return
+ }
+
+ var deletePrm DeletePrm
+ deletePrm.SetAddresses(buf...)
+
+ // delete accumulated objects
+ res, err := s.delete(ctx, deletePrm, true)
+
+ result.deleted = res.deleted
+ result.failedToDelete = uint64(len(buf)) - res.deleted
+ result.success = true
+
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
+ zap.Error(err),
+ )
+ result.success = false
+ }
+
+ return
+}
+
+func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
buf := make([]oid.Address, 0, s.rmBatchSize)
@@ -308,47 +312,20 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
return nil
})
- // iterate over metabase's objects with GC mark
- // (no more than s.rmBatchSize objects)
- err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
- if err != nil {
- s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
- zap.String("error", err.Error()),
- )
-
- return
- } else if len(buf) == 0 {
- result.success = true
- return
+ if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil {
+ return nil, err
}
- var deletePrm DeletePrm
- deletePrm.SetAddresses(buf...)
-
- // delete accumulated objects
- res, err := s.delete(ctx, deletePrm, true)
-
- result.deleted = res.deleted
- result.failedToDelete = uint64(len(buf)) - res.deleted
- result.success = true
-
- if err != nil {
- s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
- zap.String("error", err.Error()),
- )
- result.success = false
- }
-
- return
+ return buf, nil
}
func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
- workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount)
- batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize)
+ workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
+ batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
return
}
-func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
+func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
var err error
startedAt := time.Now()
@@ -356,8 +333,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
}()
- s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
- defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -366,7 +343,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock {
batch = append(batch, o.Address())
@@ -396,7 +373,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
+ s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
}
}
@@ -414,24 +391,25 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
return
}
+ s.handleExpiredObjectsUnsafe(ctx, expired)
+}
+
+func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
expired, err := s.getExpiredWithLinked(ctx, expired)
if err != nil {
- s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
+ s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
return
}
- var inhumePrm meta.InhumePrm
-
- inhumePrm.SetAddresses(expired...)
- inhumePrm.SetGCMark()
-
- // inhume the collected objects
- res, err := s.metaBase.Inhume(ctx, inhumePrm)
+ res, err := s.inhumeGC(ctx, expired)
if err != nil {
- s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err))
return
}
@@ -449,6 +427,12 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
}
func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+
result := make([]oid.Address, 0, len(source))
parentToChildren, err := s.metaBase.GetChildren(ctx, source)
if err != nil {
@@ -462,7 +446,20 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address)
return result, nil
}
-func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
+func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) {
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return meta.InhumeRes{}, err
+ }
+ defer release()
+
+ var inhumePrm meta.InhumePrm
+ inhumePrm.SetAddresses(addrs...)
+ inhumePrm.SetGCMark()
+ return s.metaBase.Inhume(ctx, inhumePrm)
+}
+
+func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
var err error
startedAt := time.Now()
@@ -470,11 +467,10 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone)
}()
- epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
- log.Debug(logs.ShardStartedExpiredTombstonesHandling)
- defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
+ log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
+ defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling)
const tssDeleteBatch = 50
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@@ -492,22 +488,29 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
})
for {
- log.Debug(logs.ShardIteratingTombstones)
+ log.Debug(ctx, logs.ShardIteratingTombstones)
s.m.RLock()
if s.info.Mode.NoMetabase() {
- s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
+ s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
s.m.RUnlock()
return
}
- err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
+ var release qos.ReleaseFunc
+ release, err = s.opsLimiter.ReadRequest(ctx)
if err != nil {
- log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+ log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+ s.m.RUnlock()
+ return
+ }
+ err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
+ release()
+ if err != nil {
+ log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
-
return
}
@@ -524,7 +527,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
}
}
- log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
+ log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
if len(tssExp) > 0 {
s.expiredTombstonesCallback(ctx, tssExp)
}
@@ -535,7 +538,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
}
}
-func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
+func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
var err error
startedAt := time.Now()
@@ -543,8 +546,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
}()
- s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
- defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -554,14 +557,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
if o.Type() == objectSDK.TypeLock {
batch = append(batch, o.Address())
if len(batch) == batchSize {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
+ s.expiredLocksCallback(egCtx, epoch, expired)
return egCtx.Err()
})
batch = make([]oid.Address, 0, batchSize)
@@ -575,7 +578,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
if len(batch) > 0 {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
+ s.expiredLocksCallback(egCtx, epoch, expired)
return egCtx.Err()
})
}
@@ -584,7 +587,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
+ s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
}
}
@@ -596,7 +599,13 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
return ErrDegradedMode
}
- err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
select {
case <-ctx.Done():
return meta.ErrInterruptIterator
@@ -612,12 +621,11 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
}
func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return nil, ErrDegradedMode
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
}
+ defer release()
return s.metaBase.FilterExpired(ctx, epoch, addresses)
}
@@ -627,28 +635,22 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid
//
// Does not modify tss.
func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
- if s.GetMode().NoMetabase() {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
return
}
- // Mark tombstones as garbage.
- var pInhume meta.InhumePrm
-
- tsAddrs := make([]oid.Address, 0, len(tss))
- for _, ts := range tss {
- tsAddrs = append(tsAddrs, ts.Tombstone())
- }
-
- pInhume.SetGCMark()
- pInhume.SetAddresses(tsAddrs...)
-
- // inhume tombstones
- res, err := s.metaBase.Inhume(ctx, pInhume)
+ release, err := s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
+ return
+ }
+ res, err := s.metaBase.InhumeTombstones(ctx, tss)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
return
}
@@ -663,26 +665,27 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size))
i++
}
-
- // drop just processed expired tombstones
- // from graveyard
- err = s.metaBase.DropGraves(ctx, tss)
- if err != nil {
- s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
- }
}
// HandleExpiredLocks unlocks all objects which were locked by lockers.
// If successful, marks lockers themselves as garbage.
func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
- if s.GetMode().NoMetabase() {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return
+ }
+
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
return
}
unlocked, err := s.metaBase.FreeLockedBy(lockers)
+ release()
if err != nil {
- s.log.Warn(logs.ShardFailureToUnlockObjects,
- zap.String("error", err.Error()),
- )
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
return
}
@@ -690,13 +693,15 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
var pInhume meta.InhumePrm
pInhume.SetAddresses(lockers...)
pInhume.SetForceGCMark()
-
- res, err := s.metaBase.Inhume(ctx, pInhume)
+ release, err = s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
+ return
+ }
+ res, err := s.metaBase.Inhume(ctx, pInhume)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
return
}
@@ -718,7 +723,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
if err != nil {
- s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
+ s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
return
}
@@ -726,47 +731,57 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc
return
}
- s.handleExpiredObjects(ctx, expiredUnlocked)
+ s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked)
}
// HandleDeletedLocks unlocks all objects which were locked by lockers.
-func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
- if s.GetMode().NoMetabase() {
+func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
return
}
- _, err := s.metaBase.FreeLockedBy(lockers)
+ release, err := s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardFailureToUnlockObjects,
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ return
+ }
+ _, err = s.metaBase.FreeLockedBy(lockers)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
return
}
}
-// NotificationChannel returns channel for shard events.
-func (s *Shard) NotificationChannel() chan<- Event {
- return s.gc.eventChan
+// NotificationChannel returns channel for new epoch events.
+func (s *Shard) NotificationChannel() chan<- uint64 {
+ return s.gc.newEpochChan
}
-func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
+func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) {
ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics")
defer span.End()
- epoch := e.(newEpoch).epoch
-
- s.log.Debug(logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
s.collectExpiredContainerSizeMetrics(ctx, epoch)
s.collectExpiredContainerCountMetrics(ctx, epoch)
}
func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) {
- ids, err := s.metaBase.ZeroSizeContainers(ctx)
+ release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ return
+ }
+ ids, err := s.metaBase.ZeroSizeContainers(ctx)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
return
}
if len(ids) == 0 {
@@ -776,9 +791,15 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui
}
func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) {
- ids, err := s.metaBase.ZeroCountContainers(ctx)
+ release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
- s.log.Warn(logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ return
+ }
+ ids, err := s.metaBase.ZeroCountContainers(ctx)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
return
}
if len(ids) == 0 {
diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go
index 3993593ad..54d2f1510 100644
--- a/pkg/local_object_storage/shard/gc_internal_test.go
+++ b/pkg/local_object_storage/shard/gc_internal_test.go
@@ -37,7 +37,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -61,8 +62,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
meta.WithEpochState(epochState{}),
),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))),
- WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(addresses)
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
@@ -73,13 +74,13 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
return pool
}),
WithGCRemoverSleepInterval(1 * time.Second),
+ WithDisabledGC(),
}
sh = New(opts...)
- sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} }
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index 90958cd35..f512a488a 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -5,13 +5,13 @@ import (
"errors"
"testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -34,7 +34,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
require.NoError(t, err)
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
+ sh.gc.handleEvent(context.Background(), epoch.Value)
var getPrm GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
@@ -131,7 +131,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
@@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
require.True(t, errors.As(err, &splitInfoError), "split info must be provided")
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
+ sh.gc.handleEvent(context.Background(), epoch.Value)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")
@@ -190,7 +190,7 @@ func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool
additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
obj := testutil.GenerateObjectWithSize(1024)
@@ -254,7 +254,7 @@ func TestGCDontDeleteObjectFromWritecache(t *testing.T) {
additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
obj := testutil.GenerateObjectWithSize(1024)
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index d1c393613..28f8912be 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -10,7 +10,6 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -112,6 +111,12 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
return c.Get(ctx, prm.addr)
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return GetRes{}, err
+ }
+ defer release()
+
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
@@ -144,7 +149,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
} else {
- s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
+ s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
}
if s.hasWriteCache() {
@@ -153,16 +158,14 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return res, false, err
}
if client.IsErrObjectNotFound(err) {
- s.log.Debug(logs.ShardObjectIsMissingInWritecache,
+ s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache,
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Bool("skip_meta", skipMeta))
} else {
- s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
+ s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache,
zap.Error(err),
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Bool("skip_meta", skipMeta))
}
}
if skipMeta || mErr != nil {
@@ -175,7 +178,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
- return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
+ return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err)
}
storageID := mExRes.StorageID()
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index 8a7c6972d..837991b73 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -5,11 +5,9 @@ import (
"context"
"errors"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -32,7 +30,7 @@ func TestShard_Get(t *testing.T) {
func testShardGet(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
var putPrm PutPrm
var getPrm GetPrm
@@ -49,7 +47,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -67,7 +65,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -95,13 +93,13 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(child))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.True(t, binaryEqual(child, res.Object()))
getPrm.SetAddress(object.AddressOf(parent))
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
var si *objectSDK.SplitInfoError
require.True(t, errors.As(err, &si))
@@ -115,19 +113,6 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
})
}
-func testGet(t *testing.T, sh *Shard, getPrm GetPrm, hasWriteCache bool) (GetRes, error) {
- res, err := sh.Get(context.Background(), getPrm)
- if hasWriteCache {
- require.Eventually(t, func() bool {
- if client.IsErrObjectNotFound(err) {
- res, err = sh.Get(context.Background(), getPrm)
- }
- return !client.IsErrObjectNotFound(err)
- }, time.Second, time.Millisecond*100)
- }
- return res, err
-}
-
// binary equal is used when object contains empty lists in the structure and
// requre.Equal fails on comparing and []{} lists.
func binaryEqual(a, b *objectSDK.Object) bool {
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index ff57e3bf9..34b8290d6 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -81,6 +81,12 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
headParams.SetAddress(prm.addr)
headParams.SetRaw(prm.raw)
+ release, limitErr := s.opsLimiter.ReadRequest(ctx)
+ if limitErr != nil {
+ return HeadRes{}, limitErr
+ }
+ defer release()
+
var res meta.GetRes
res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index 1f4631993..deb3019df 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -4,11 +4,9 @@ import (
"context"
"errors"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
@@ -30,7 +28,7 @@ func TestShard_Head(t *testing.T) {
func testShardHead(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
var putPrm PutPrm
var headPrm HeadPrm
@@ -46,7 +44,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
headPrm.SetAddress(object.AddressOf(obj))
- res, err := testHead(t, sh, headPrm, hasWriteCache)
+ res, err := sh.Head(context.Background(), headPrm)
require.NoError(t, err)
require.Equal(t, obj.CutPayload(), res.Object())
})
@@ -74,7 +72,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
var siErr *objectSDK.SplitInfoError
- _, err = testHead(t, sh, headPrm, hasWriteCache)
+ _, err = sh.Head(context.Background(), headPrm)
require.True(t, errors.As(err, &siErr))
headPrm.SetAddress(object.AddressOf(parent))
@@ -85,16 +83,3 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
require.Equal(t, parent.CutPayload(), head.Object())
})
}
-
-func testHead(t *testing.T, sh *Shard, headPrm HeadPrm, hasWriteCache bool) (HeadRes, error) {
- res, err := sh.Head(context.Background(), headPrm)
- if hasWriteCache {
- require.Eventually(t, func() bool {
- if client.IsErrObjectNotFound(err) {
- res, err = sh.Head(context.Background(), headPrm)
- }
- return !client.IsErrObjectNotFound(err)
- }, time.Second, time.Millisecond*100)
- }
- return res, err
-}
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index 2fe68d270..7391adef2 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -1,11 +1,11 @@
package shard
import (
+ "context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/mr-tron/base58"
"go.uber.org/zap"
)
@@ -31,12 +31,12 @@ func (s *Shard) ID() *ID {
}
// UpdateID reads shard ID saved in the metabase and updates it if it is missing.
-func (s *Shard) UpdateID() (err error) {
+func (s *Shard) UpdateID(ctx context.Context) (err error) {
var idFromMetabase []byte
modeDegraded := s.GetMode().NoMetabase()
if !modeDegraded {
- if idFromMetabase, err = s.metaBase.GetShardID(mode.ReadOnly); err != nil {
- err = fmt.Errorf("failed to read shard id from metabase: %w", err)
+ if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
+ err = fmt.Errorf("read shard id from metabase: %w", err)
}
}
@@ -45,14 +45,12 @@ func (s *Shard) UpdateID() (err error) {
}
shardID := s.info.ID.String()
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.SetShardID(shardID)
- }
+ s.metricsWriter.SetShardID(shardID)
if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID)
}
- s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))}
+ s.log = s.log.With(zap.Stringer("shard_id", s.info.ID))
s.metaBase.SetLogger(s.log)
s.blobStor.SetLogger(s.log)
if s.hasWriteCache() {
@@ -63,10 +61,11 @@ func (s *Shard) UpdateID() (err error) {
if s.pilorama != nil {
s.pilorama.SetParentID(s.info.ID.String())
}
+ s.opsLimiter.SetParentID(s.info.ID.String())
if len(idFromMetabase) == 0 && !modeDegraded {
- if setErr := s.metaBase.SetShardID(*s.info.ID, s.GetMode()); setErr != nil {
- err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr))
+ if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
+ err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr))
}
}
return
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index 746177c3a..c0fd65f4b 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -82,6 +81,12 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return InhumeRes{}, err
+ }
+ defer release()
+
if s.hasWriteCache() {
for i := range prm.target {
_ = s.writeCache.Delete(ctx, prm.target[i])
@@ -109,9 +114,8 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrLockObjectRemoval
}
- s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
+ s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
+ zap.Error(err),
)
s.m.RUnlock()
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 82754568f..1421f0e18 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -27,7 +27,7 @@ func TestShard_Inhume(t *testing.T) {
func testShardInhume(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -48,7 +48,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
_, err = sh.Inhume(context.Background(), inhPrm)
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 08ea81a0c..af87981ca 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -34,6 +33,30 @@ func (r ListContainersRes) Containers() []cid.ID {
return r.containers
}
+// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
+type IterateOverContainersPrm struct {
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, objectSDK.Type, cid.ID) error
+}
+
+// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type IterateOverObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
+ // Handler function executed upon objects in db.
+ Handler func(context.Context, *objectcore.Info) error
+}
+
+// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation.
+type CountAliveObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
+}
+
// ListWithCursorPrm contains parameters for ListWithCursor operation.
type ListWithCursorPrm struct {
count uint32
@@ -83,9 +106,15 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
return SelectRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return SelectRes{}, err
+ }
+ defer release()
+
lst, err := s.metaBase.Containers(ctx)
if err != nil {
- return res, fmt.Errorf("can't list stored containers: %w", err)
+ return res, fmt.Errorf("list stored containers: %w", err)
}
filters := objectSDK.NewSearchFilters()
@@ -98,10 +127,9 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase
if err != nil {
- s.log.Debug(logs.ShardCantSelectAllObjects,
+ s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
- zap.String("error", err.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
continue
}
@@ -123,9 +151,15 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo
return ListContainersRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ListContainersRes{}, err
+ }
+ defer release()
+
containers, err := s.metaBase.Containers(ctx)
if err != nil {
- return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err)
+ return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err)
}
return ListContainersRes{
@@ -151,12 +185,18 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
return ListWithCursorRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ListWithCursorRes{}, err
+ }
+ defer release()
+
var metaPrm meta.ListPrm
metaPrm.SetCount(prm.count)
metaPrm.SetCursor(prm.cursor)
res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
if err != nil {
- return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err)
+ return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err)
}
return ListWithCursorRes{
@@ -164,3 +204,96 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
cursor: res.Cursor(),
}, nil
}
+
+// IterateOverContainers lists physical containers presented in shard.
+func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ var metaPrm meta.IterateOverContainersPrm
+ metaPrm.Handler = prm.Handler
+ err = s.metaBase.IterateOverContainers(ctx, metaPrm)
+ if err != nil {
+ return fmt.Errorf("iterate over containers: %w", err)
+ }
+
+ return nil
+}
+
+// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name.
+func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ var metaPrm meta.IterateOverObjectsInContainerPrm
+ metaPrm.ContainerID = prm.ContainerID
+ metaPrm.ObjectType = prm.ObjectType
+ metaPrm.Handler = prm.Handler
+ err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
+ if err != nil {
+ return fmt.Errorf("iterate over objects: %w", err)
+ }
+
+ return nil
+}
+
+// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
+func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket")
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
+
+ var metaPrm meta.CountAliveObjectsInContainerPrm
+ metaPrm.ObjectType = prm.ObjectType
+ metaPrm.ContainerID = prm.ContainerID
+ count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
+ if err != nil {
+ return 0, fmt.Errorf("count alive objects in bucket: %w", err)
+ }
+
+ return count, nil
+}
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index 3414dc76a..139b2e316 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -18,14 +18,14 @@ func TestShard_List(t *testing.T) {
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
testShardList(t, sh)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
shWC := newShard(t, true)
- defer func() { require.NoError(t, shWC.Close()) }()
+ defer func() { require.NoError(t, shWC.Close(context.Background())) }()
testShardList(t, shWC)
})
}
diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go
index 4a8d89d63..9c392fdac 100644
--- a/pkg/local_object_storage/shard/lock.go
+++ b/pkg/local_object_storage/shard/lock.go
@@ -38,7 +38,13 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []
return ErrDegradedMode
}
- err := s.metaBase.Lock(ctx, idCnr, locker, locked)
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ err = s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@@ -61,6 +67,12 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return false, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer release()
+
var prm meta.IsLockedPrm
prm.SetAddress(addr)
@@ -72,10 +84,10 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return res.Locked(), nil
}
-// GetLocked return lock id's of the provided object. Not found object is
+// GetLocks return lock id's of the provided object. Not found object is
// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
-func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocked",
+func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", addr.EncodeToString()),
@@ -86,5 +98,12 @@ func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, erro
if m.NoMetabase() {
return nil, ErrDegradedMode
}
- return s.metaBase.GetLocked(ctx, addr)
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+
+ return s.metaBase.GetLocks(ctx, addr)
}
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 9ce95feb1..3878a65cd 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -28,9 +28,10 @@ func TestShard_Lock(t *testing.T) {
var sh *Shard
rootPath := t.TempDir()
+ l := logger.NewLoggerWrapper(zap.NewNop())
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
- WithLogger(&logger.Logger{Logger: zap.NewNop()}),
+ WithLogger(l),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -53,8 +54,8 @@ func TestShard_Lock(t *testing.T) {
meta.WithPath(filepath.Join(rootPath, "meta")),
meta.WithEpochState(epochState{}),
),
- WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(addresses)
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
}),
}
@@ -62,7 +63,7 @@ func TestShard_Lock(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
@@ -148,7 +149,7 @@ func TestShard_Lock(t *testing.T) {
func TestShard_IsLocked(t *testing.T) {
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
new file mode 100644
index 000000000..087ba42ef
--- /dev/null
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -0,0 +1,60 @@
+package shard
+
+import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+
+// MetricsWriter is an interface that must store shard's metrics.
+type MetricsWriter interface {
+ // SetObjectCounter must set object counter taking into account object type.
+ SetObjectCounter(objectType string, v uint64)
+ // AddToObjectCounter must update object counter taking into account object
+ // type.
+ // Negative parameter must decrease the counter.
+ AddToObjectCounter(objectType string, delta int)
+ // AddToContainerSize must add a value to the container size.
+ // Value can be negative.
+ AddToContainerSize(cnr string, value int64)
+ // AddToPayloadSize must add a value to the payload size.
+ // Value can be negative.
+ AddToPayloadSize(value int64)
+ // IncObjectCounter must increment shard's object counter taking into account
+ // object type.
+ IncObjectCounter(objectType string)
+ // SetShardID must set (update) the shard identifier that will be used in
+ // metrics.
+ SetShardID(id string)
+ // SetMode set mode of shard.
+ SetMode(mode mode.Mode)
+ // SetContainerObjectsCount sets container object count.
+ SetContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncContainerObjectsCount increments container object count.
+ IncContainerObjectsCount(cnrID string, objectType string)
+ // SubContainerObjectsCount subtracts container object count.
+ SubContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncRefillObjectsCount increments refill objects count.
+ IncRefillObjectsCount(path string, size int, success bool)
+ // SetRefillPercent sets refill percent.
+ SetRefillPercent(path string, percent uint32)
+ // SetRefillStatus sets refill status.
+ SetRefillStatus(path string, status string)
+ // SetEvacuationInProgress sets evacuation status
+ SetEvacuationInProgress(value bool)
+}
+
+type noopMetrics struct{}
+
+var _ MetricsWriter = noopMetrics{}
+
+func (noopMetrics) SetObjectCounter(string, uint64) {}
+func (noopMetrics) AddToObjectCounter(string, int) {}
+func (noopMetrics) AddToContainerSize(string, int64) {}
+func (noopMetrics) AddToPayloadSize(int64) {}
+func (noopMetrics) IncObjectCounter(string) {}
+func (noopMetrics) SetShardID(string) {}
+func (noopMetrics) SetMode(mode.Mode) {}
+func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {}
+func (noopMetrics) IncContainerObjectsCount(string, string) {}
+func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {}
+func (noopMetrics) IncRefillObjectsCount(string, int, bool) {}
+func (noopMetrics) SetRefillPercent(string, uint32) {}
+func (noopMetrics) SetRefillStatus(string, string) {}
+func (noopMetrics) SetEvacuationInProgress(bool) {}
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 01a85da97..5230dcad0 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -17,6 +17,7 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
@@ -34,7 +35,7 @@ type metricsStore struct {
refillStatus string
}
-func NewMetricStore() *metricsStore {
+func newMetricStore() *metricsStore {
return &metricsStore{
objCounters: map[string]uint64{
"phy": 0,
@@ -200,11 +201,11 @@ func TestCounters(t *testing.T) {
dir := t.TempDir()
sh, mm := shardWithMetrics(t, dir)
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
- sh.SetMode(mode.ReadOnly)
+ sh.SetMode(context.Background(), mode.ReadOnly)
require.Equal(t, mode.ReadOnly, mm.mode)
- sh.SetMode(mode.ReadWrite)
+ sh.SetMode(context.Background(), mode.ReadWrite)
require.Equal(t, mode.ReadWrite, mm.mode)
const objNumber = 10
@@ -308,17 +309,19 @@ func TestCounters(t *testing.T) {
t.Run("inhume_TS", func(t *testing.T) {
var prm InhumePrm
- ts := objectcore.AddressOf(testutil.GenerateObject())
phy := mm.getObjectCounter(physical)
logic := mm.getObjectCounter(logical)
custom := mm.getObjectCounter(user)
inhumedNumber := int(phy / 4)
- prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
-
- _, err := sh.Inhume(context.Background(), prm)
- require.NoError(t, err)
+ for _, o := range addrFromObjs(oo[:inhumedNumber]) {
+ ts := oidtest.Address()
+ ts.SetContainer(o.Container())
+ prm.SetTarget(ts, o)
+ _, err := sh.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ }
for i := range inhumedNumber {
cid, ok := oo[i].ContainerID()
@@ -401,7 +404,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index 1bab57448..901528976 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -1,6 +1,8 @@
package shard
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -18,19 +20,21 @@ var ErrDegradedMode = logicerr.New("shard is in degraded mode")
//
// Returns any error encountered that did not allow
// setting shard mode.
-func (s *Shard) SetMode(m mode.Mode) error {
+func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error {
unlock := s.lockExclusive()
defer unlock()
- return s.setMode(m)
+ return s.setMode(ctx, m)
}
-func (s *Shard) setMode(m mode.Mode) error {
- s.log.Info(logs.ShardSettingShardMode,
+func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
+ s.log.Info(ctx, logs.ShardSettingShardMode,
zap.Stringer("old_mode", s.info.Mode),
zap.Stringer("new_mode", m))
- components := []interface{ SetMode(mode.Mode) error }{
+ components := []interface {
+ SetMode(context.Context, mode.Mode) error
+ }{
s.metaBase, s.blobStor,
}
@@ -58,18 +62,16 @@ func (s *Shard) setMode(m mode.Mode) error {
if !m.Disabled() {
for i := range components {
- if err := components[i].SetMode(m); err != nil {
+ if err := components[i].SetMode(ctx, m); err != nil {
return err
}
}
}
s.info.Mode = m
- if s.metricsWriter != nil {
- s.metricsWriter.SetMode(s.info.Mode)
- }
+ s.metricsWriter.SetMode(s.info.Mode)
- s.log.Info(logs.ShardShardModeSetSuccessfully,
+ s.log.Info(ctx, logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
return nil
}
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index d7a9e7012..f8cb00a31 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -17,7 +17,8 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
+ obj *objectSDK.Object
+ indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
@@ -28,6 +29,10 @@ func (p *PutPrm) SetObject(obj *objectSDK.Object) {
p.obj = obj
}
+func (p *PutPrm) SetIndexAttributes(v bool) {
+ p.indexAttributes = v
+}
+
// Put saves the object in shard.
//
// Returns any error encountered that
@@ -62,6 +67,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var res common.PutRes
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return PutRes{}, err
+ }
+ defer release()
+
// exist check are not performed there, these checks should be executed
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
@@ -70,13 +81,13 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
}
if err != nil || !tryCache {
if err != nil {
- s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
- zap.String("err", err.Error()))
+ s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
+ zap.Error(err))
}
res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
- return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
+ return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err)
}
}
@@ -84,11 +95,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
+ pPrm.SetIndexAttributes(prm.indexAttributes)
res, err := s.metaBase.Put(ctx, pPrm)
if err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
- return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
+ return PutRes{}, fmt.Errorf("put object to metabase: %w", err)
}
if res.Inserted {
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index 701268820..443689104 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -131,6 +131,12 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
return obj, nil
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return RngRes{}, err
+ }
+ defer release()
+
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go
index cc73db316..06fe9f511 100644
--- a/pkg/local_object_storage/shard/range_test.go
+++ b/pkg/local_object_storage/shard/range_test.go
@@ -79,7 +79,8 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -94,7 +95,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
}),
},
})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 0d83caa0c..20f1f2b6f 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -6,10 +6,13 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -18,37 +21,9 @@ import (
var ErrRebuildInProgress = errors.New("shard rebuild in progress")
-type RebuildWorkerLimiter interface {
- AcquireWorkSlot(ctx context.Context) error
- ReleaseWorkSlot()
-}
-
-type rebuildLimiter struct {
- semaphore chan struct{}
-}
-
-func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter {
- return &rebuildLimiter{
- semaphore: make(chan struct{}, workersCount),
- }
-}
-
-func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error {
- select {
- case l.semaphore <- struct{}{}:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func (l *rebuildLimiter) ReleaseWorkSlot() {
- <-l.semaphore
-}
-
type rebuildTask struct {
- limiter RebuildWorkerLimiter
- fillPercent int
+ concurrencyLimiter common.RebuildLimiter
+ fillPercent int
}
type rebuilder struct {
@@ -88,36 +63,37 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D
if !ok {
continue
}
- runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter)
+ runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter)
}
}
}()
}
func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
- fillPercent int, limiter RebuildWorkerLimiter,
+ fillPercent int, concLimiter common.RebuildLimiter,
) {
select {
case <-ctx.Done():
return
default:
}
- log.Info(logs.BlobstoreRebuildStarted)
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil {
- log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
+ log.Info(ctx, logs.BlobstoreRebuildStarted)
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil {
+ log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err))
} else {
- log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
+ log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully)
}
}
-func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int,
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int,
) error {
select {
case <-ctx.Done():
return ctx.Err()
case r.tasks <- rebuildTask{
- limiter: limiter,
- fillPercent: fillPercent,
+ concurrencyLimiter: limiter,
+ fillPercent: fillPercent,
}:
return nil
default:
@@ -125,7 +101,7 @@ func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLi
}
}
-func (r *rebuilder) Stop(log *logger.Logger) {
+func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -138,7 +114,7 @@ func (r *rebuilder) Stop(log *logger.Logger) {
r.wg.Wait()
r.cancel = nil
r.done = nil
- log.Info(logs.BlobstoreRebuildStopped)
+ log.Info(ctx, logs.BlobstoreRebuildStopped)
}
var errMBIsNotAvailable = errors.New("metabase is not available")
@@ -166,7 +142,7 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres
}
type RebuildPrm struct {
- ConcurrencyLimiter RebuildWorkerLimiter
+ ConcurrencyLimiter common.ConcurrencyLimiter
TargetFillPercent uint32
}
@@ -188,5 +164,30 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
return ErrDegradedMode
}
- return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent))
+ limiter := &rebuildLimiter{
+ concurrencyLimiter: p.ConcurrencyLimiter,
+ rateLimiter: s.opsLimiter,
+ }
+ return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent))
+}
+
+var _ common.RebuildLimiter = (*rebuildLimiter)(nil)
+
+type rebuildLimiter struct {
+ concurrencyLimiter common.ConcurrencyLimiter
+ rateLimiter qos.Limiter
+}
+
+func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
+ return r.concurrencyLimiter.AcquireWorkSlot(ctx)
+}
+
+func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) {
+ release, err := r.rateLimiter.ReadRequest(ctx)
+ return common.ReleaseFunc(release), err
+}
+
+func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) {
+ release, err := r.rateLimiter.WriteRequest(ctx)
+ return common.ReleaseFunc(release), err
}
diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go
index 0025bb45a..d90343265 100644
--- a/pkg/local_object_storage/shard/refill_test.go
+++ b/pkg/local_object_storage/shard/refill_test.go
@@ -34,7 +34,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)},
})
- defer func() { require.NoError(b, sh.Close()) }()
+ defer func() { require.NoError(b, sh.Close(context.Background())) }()
var putPrm PutPrm
@@ -61,7 +61,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, err)
}
- require.NoError(b, sh.Close())
+ require.NoError(b, sh.Close(context.Background()))
require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path))
require.NoError(b, sh.Open(context.Background()))
@@ -72,5 +72,5 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, sh.Init(context.Background()))
- require.NoError(b, sh.Close())
+ require.NoError(b, sh.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index 7dacbfa6c..e563f390b 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -51,7 +51,7 @@ func TestShardReload(t *testing.T) {
WithMetaBaseOptions(metaOpts...),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(p, "pilorama"))),
- WithMetricsWriter(NewMetricStore()),
+ WithMetricsWriter(newMetricStore()),
}
sh := New(opts...)
@@ -59,7 +59,7 @@ func TestShardReload(t *testing.T) {
require.NoError(t, sh.Init(context.Background()))
defer func() {
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
}()
objects := make([]objAddr, 5)
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index 1615f5fbe..fbc751e26 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -15,8 +15,9 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ isIndexedContainer bool
}
// SelectRes groups the resulting values of Select operation.
@@ -25,8 +26,9 @@ type SelectRes struct {
}
// SetContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) SetContainerID(cnr cid.ID) {
+func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) {
p.cnr = cnr
+ p.isIndexedContainer = isIndexedContainer
}
// SetFilters is a Select option to set the object filters.
@@ -58,13 +60,20 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
return SelectRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return SelectRes{}, nil
+ }
+ defer release()
+
var selectPrm meta.SelectPrm
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
+ selectPrm.SetUseAttributeIndex(prm.isIndexedContainer)
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
- return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
+ return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err)
}
return SelectRes{
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 7496fc352..d89b56266 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -7,6 +7,8 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -57,50 +59,6 @@ type DeletedLockCallback func(context.Context, []oid.Address)
// EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers.
type EmptyContainersCallback func(context.Context, []cid.ID)
-// MetricsWriter is an interface that must store shard's metrics.
-type MetricsWriter interface {
- // SetObjectCounter must set object counter taking into account object type.
- SetObjectCounter(objectType string, v uint64)
- // AddToObjectCounter must update object counter taking into account object
- // type.
- // Negative parameter must decrease the counter.
- AddToObjectCounter(objectType string, delta int)
- // AddToContainerSize must add a value to the container size.
- // Value can be negative.
- AddToContainerSize(cnr string, value int64)
- // AddToPayloadSize must add a value to the payload size.
- // Value can be negative.
- AddToPayloadSize(value int64)
- // IncObjectCounter must increment shard's object counter taking into account
- // object type.
- IncObjectCounter(objectType string)
- // SetShardID must set (update) the shard identifier that will be used in
- // metrics.
- SetShardID(id string)
- // SetReadonly must set shard mode.
- SetMode(mode mode.Mode)
- // IncErrorCounter increment error counter.
- IncErrorCounter()
- // ClearErrorCounter clear error counter.
- ClearErrorCounter()
- // DeleteShardMetrics deletes shard metrics from registry.
- DeleteShardMetrics()
- // SetContainerObjectsCount sets container object count.
- SetContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncContainerObjectsCount increments container object count.
- IncContainerObjectsCount(cnrID string, objectType string)
- // SubContainerObjectsCount subtracts container object count.
- SubContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncRefillObjectsCount increments refill objects count.
- IncRefillObjectsCount(path string, size int, success bool)
- // SetRefillPercent sets refill percent.
- SetRefillPercent(path string, percent uint32)
- // SetRefillStatus sets refill status.
- SetRefillStatus(path string, status string)
- // SetEvacuationInProgress sets evacuation status
- SetEvacuationInProgress(value bool)
-}
-
type cfg struct {
m sync.RWMutex
@@ -138,17 +96,23 @@ type cfg struct {
metricsWriter MetricsWriter
- reportErrorFunc func(selfID string, message string, err error)
+ reportErrorFunc func(ctx context.Context, selfID string, message string, err error)
+
+ containerInfo container.InfoProvider
+
+ opsLimiter qos.Limiter
}
func defaultCfg() *cfg {
return &cfg{
rmBatchSize: 100,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
gcCfg: defaultGCCfg(),
- reportErrorFunc: func(string, string, error) {},
+ reportErrorFunc: func(context.Context, string, string, error) {},
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
+ metricsWriter: noopMetrics{},
+ opsLimiter: qos.NewNoopLimiter(),
}
}
@@ -170,8 +134,8 @@ func New(opts ...Option) *Shard {
tsSource: c.tsSource,
}
- reportFunc := func(msg string, err error) {
- s.reportErrorFunc(s.ID().String(), msg, err)
+ reportFunc := func(ctx context.Context, msg string, err error) {
+ s.reportErrorFunc(ctx, s.ID().String(), msg, err)
}
s.blobStor.SetReportErrorFunc(reportFunc)
@@ -241,7 +205,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option {
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = l
- c.gcCfg.log = l
+ c.gcCfg.log = l.WithTag(logger.TagGC)
}
}
@@ -254,7 +218,7 @@ func WithWriteCache(use bool) Option {
// hasWriteCache returns bool if write cache exists on shards.
func (s *Shard) hasWriteCache() bool {
- return s.cfg.useWriteCache
+ return s.useWriteCache
}
// NeedRefillMetabase returns true if metabase is needed to be refilled.
@@ -357,7 +321,7 @@ func WithGCMetrics(v GCMectrics) Option {
// WithReportErrorFunc returns option to specify callback for handling storage-related errors
// in the background workers.
-func WithReportErrorFunc(f func(selfID string, message string, err error)) Option {
+func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option {
return func(c *cfg) {
c.reportErrorFunc = f
}
@@ -401,16 +365,29 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option {
}
}
-func (s *Shard) fillInfo() {
- s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
- s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
- s.cfg.info.Mode = s.GetMode()
+// WithContainerInfoProvider returns option to set container info provider.
+func WithContainerInfoProvider(containerInfo container.InfoProvider) Option {
+ return func(c *cfg) {
+ c.containerInfo = containerInfo
+ }
+}
- if s.cfg.useWriteCache {
- s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo()
+func WithLimiter(l qos.Limiter) Option {
+ return func(c *cfg) {
+ c.opsLimiter = l
+ }
+}
+
+func (s *Shard) fillInfo() {
+ s.info.MetaBaseInfo = s.metaBase.DumpInfo()
+ s.info.BlobStorInfo = s.blobStor.DumpInfo()
+ s.info.Mode = s.GetMode()
+
+ if s.useWriteCache {
+ s.info.WriteCacheInfo = s.writeCache.DumpInfo()
}
if s.pilorama != nil {
- s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo()
+ s.info.PiloramaInfo = s.pilorama.DumpInfo()
}
}
@@ -428,13 +405,13 @@ const (
)
func (s *Shard) updateMetrics(ctx context.Context) {
- if s.cfg.metricsWriter == nil || s.GetMode().NoMetabase() {
+ if s.GetMode().NoMetabase() {
return
}
cc, err := s.metaBase.ObjectCounters()
if err != nil {
- s.log.Warn(logs.ShardMetaObjectCounterRead,
+ s.log.Warn(ctx, logs.ShardMetaObjectCounterRead,
zap.Error(err),
)
@@ -447,7 +424,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
cnrList, err := s.metaBase.Containers(ctx)
if err != nil {
- s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
+ s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err))
return
}
@@ -456,7 +433,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
for i := range cnrList {
size, err := s.metaBase.ContainerSize(cnrList[i])
if err != nil {
- s.log.Warn(logs.ShardMetaCantReadContainerSize,
+ s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize,
zap.String("cid", cnrList[i].EncodeToString()),
zap.Error(err))
continue
@@ -469,7 +446,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
contCount, err := s.metaBase.ContainerCounters(ctx)
if err != nil {
- s.log.Warn(logs.FailedToGetContainerCounters, zap.Error(err))
+ s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err))
return
}
for contID, count := range contCount.Counts {
@@ -477,95 +454,69 @@ func (s *Shard) updateMetrics(ctx context.Context) {
s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
}
- s.cfg.metricsWriter.SetMode(s.info.Mode)
+ s.metricsWriter.SetMode(s.info.Mode)
}
// incObjectCounter increment both physical and logical object
// counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.IncObjectCounter(physical)
- s.cfg.metricsWriter.IncObjectCounter(logical)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
- if isUser {
- s.cfg.metricsWriter.IncObjectCounter(user)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
- }
+ s.metricsWriter.IncObjectCounter(physical)
+ s.metricsWriter.IncObjectCounter(logical)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
+ if isUser {
+ s.metricsWriter.IncObjectCounter(user)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
- s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
+ if v > 0 {
+ s.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) setObjectCounterBy(typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
- s.cfg.metricsWriter.SetObjectCounter(typ, v)
+ if v > 0 {
+ s.metricsWriter.SetObjectCounter(typ, v)
}
}
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
- if s.cfg.metricsWriter == nil {
- return
- }
-
for cnrID, count := range byCnr {
if count.Phy > 0 {
- s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
}
if count.Logic > 0 {
- s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
}
if count.User > 0 {
- s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
}
}
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
- if s.cfg.metricsWriter != nil && size != 0 {
- s.cfg.metricsWriter.AddToContainerSize(cnr, size)
+ if size != 0 {
+ s.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
- if s.cfg.metricsWriter != nil && size != 0 {
- s.cfg.metricsWriter.AddToPayloadSize(size)
+ if size != 0 {
+ s.metricsWriter.AddToPayloadSize(size)
}
}
func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
+ if v > 0 {
s.metricsWriter.SetContainerObjectsCount(cnr, typ, v)
}
}
-func (s *Shard) IncErrorCounter() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.IncErrorCounter()
- }
-}
-
-func (s *Shard) ClearErrorCounter() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.ClearErrorCounter()
- }
-}
-
-func (s *Shard) DeleteShardMetrics() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.DeleteShardMetrics()
- }
-}
-
func (s *Shard) SetEvacuationInProgress(val bool) {
s.m.Lock()
defer s.m.Unlock()
s.info.EvacuationInProgress = val
- if s.metricsWriter != nil {
- s.metricsWriter.SetEvacuationInProgress(val)
- }
+ s.metricsWriter.SetEvacuationInProgress(val)
}
diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go
index 73ba2e82b..84be71c4d 100644
--- a/pkg/local_object_storage/shard/shard_test.go
+++ b/pkg/local_object_storage/shard/shard_test.go
@@ -60,7 +60,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -89,8 +90,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))),
WithWriteCache(enableWriteCache),
WithWriteCacheOptions(o.wcOpts),
- WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(addresses)
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go
index de00eabd1..b1232707f 100644
--- a/pkg/local_object_storage/shard/shutdown_test.go
+++ b/pkg/local_object_storage/shard/shutdown_test.go
@@ -52,10 +52,10 @@ func TestWriteCacheObjectLoss(t *testing.T) {
})
}
require.NoError(t, errG.Wait())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
- defer func() { require.NoError(t, sh.Close()) }()
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
var getPrm GetPrm
diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go
index 26dc8ec1e..db361a8bd 100644
--- a/pkg/local_object_storage/shard/tree.go
+++ b/pkg/local_object_storage/shard/tree.go
@@ -43,6 +43,11 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeMove(ctx, d, treeID, m)
}
@@ -75,6 +80,11 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta)
}
@@ -103,9 +113,46 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync)
}
+// TreeApplyBatch implements the pilorama.Forest interface.
+func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+ return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m)
+}
+
// TreeGetByPath implements the pilorama.Forest interface.
func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath",
@@ -130,6 +177,11 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
}
@@ -155,6 +207,11 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n
if s.info.Mode.NoMetabase() {
return pilorama.Meta{}, 0, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return pilorama.Meta{}, 0, err
+ }
+ defer release()
return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID)
}
@@ -180,11 +237,16 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID)
}
// TreeSortedByFilename implements the pilorama.Forest interface.
-func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) {
+func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
@@ -204,6 +266,11 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID
if s.info.Mode.NoMetabase() {
return nil, last, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, last, err
+ }
+ defer release()
return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
}
@@ -229,6 +296,11 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return pilorama.Move{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return pilorama.Move{}, err
+ }
+ defer release()
return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height)
}
@@ -253,6 +325,11 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
return s.pilorama.TreeDrop(ctx, cid, treeID)
}
@@ -276,6 +353,11 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeList(ctx, cid)
}
@@ -299,6 +381,11 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u
if s.pilorama == nil {
return 0, ErrPiloramaDisabled
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
return s.pilorama.TreeHeight(ctx, cid, treeID)
}
@@ -323,6 +410,11 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b
if s.info.Mode.NoMetabase() {
return false, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer release()
return s.pilorama.TreeExists(ctx, cid, treeID)
}
@@ -351,6 +443,11 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
}
@@ -375,6 +472,11 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st
if s.info.Mode.NoMetabase() {
return 0, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID)
}
@@ -396,6 +498,11 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
return s.pilorama.TreeListTrees(ctx, prm)
}
@@ -425,5 +532,10 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source)
}
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index a6de07f03..9edb89df8 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -67,6 +67,12 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
return ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal)
}
@@ -124,12 +130,19 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
close(started)
defer cleanup()
- s.log.Info(logs.StartedWritecacheSealAsync)
- if err := s.writeCache.Seal(ctx, prm); err != nil {
- s.log.Warn(logs.FailedToSealWritecacheAsync, zap.Error(err))
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
return
}
- s.log.Info(logs.WritecacheSealCompletedAsync)
+ defer release()
+
+ s.log.Info(ctx, logs.StartedWritecacheSealAsync)
+ if err := s.writeCache.Seal(ctx, prm); err != nil {
+ s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
+ return
+ }
+ s.log.Info(ctx, logs.WritecacheSealCompletedAsync)
}()
select {
case <-ctx.Done():
@@ -138,5 +151,11 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
return nil
}
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
return s.writeCache.Seal(ctx, prm)
}
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index 79ab7d9c6..fd85b4501 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -43,12 +43,12 @@ func BenchmarkWriteAfterDelete(b *testing.B) {
b.SetParallelism(parallel)
benchmarkRunPar(b, cache, payloadSize)
})
- require.NoError(b, cache.Close())
+ require.NoError(b, cache.Close(context.Background()))
}
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close()) }()
+ defer func() { require.NoError(b, cache.Close(context.Background())) }()
ctx := context.Background()
objGen := testutil.RandObjGenerator{ObjSize: size}
@@ -71,7 +71,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close()) }()
+ defer func() { require.NoError(b, cache.Close(context.Background())) }()
benchmarkRunPar(b, cache, size)
}
@@ -100,7 +100,7 @@ func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening")
- require.NoError(b, cache.Init(), "initializing")
+ require.NoError(b, cache.Init(context.Background()), "initializing")
}
type testMetabase struct{}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index b97fc5856..ee709ea73 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -6,6 +6,7 @@ import (
"sync"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -55,12 +56,13 @@ func New(opts ...Option) Cache {
counter: fstree.NewSimpleCounter(),
options: options{
- log: &logger.Logger{Logger: zap.NewNop()},
+ log: logger.NewLoggerWrapper(zap.NewNop()),
maxObjectSize: defaultMaxObjectSize,
workersCount: defaultFlushWorkersCount,
maxCacheSize: defaultMaxCacheSize,
metrics: DefaultMetrics(),
flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
+ qosLimiter: qos.NewNoopLimiter(),
},
}
@@ -94,23 +96,24 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error {
if err != nil {
return metaerr.Wrap(err)
}
- return metaerr.Wrap(c.initCounters())
+ c.initCounters()
+ return nil
}
// Init runs necessary services.
-func (c *cache) Init() error {
+func (c *cache) Init(ctx context.Context) error {
c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode))
- if err := c.flushAndDropBBoltDB(context.Background()); err != nil {
+ if err := c.flushAndDropBBoltDB(ctx); err != nil {
return fmt.Errorf("flush previous version write-cache database: %w", err)
}
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache
c.cancel.Store(cancel)
c.runFlushLoop(ctx)
return nil
}
// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
-func (c *cache) Close() error {
+func (c *cache) Close(ctx context.Context) error {
if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil {
cancelValue.(context.CancelFunc)()
}
@@ -127,7 +130,7 @@ func (c *cache) Close() error {
var err error
if c.fsTree != nil {
- err = c.fsTree.Close()
+ err = c.fsTree.Close(ctx)
if err != nil {
c.fsTree = nil
}
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index dda284439..94a0a40db 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -46,7 +46,7 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
storageType = StorageTypeFSTree
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
- storagelog.Write(c.log,
+ storagelog.Write(ctx, c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index bfa6aacb0..893d27ba2 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -6,6 +6,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -14,6 +15,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
@@ -35,6 +37,7 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String())
fl := newFlushLimiter(c.flushSizeLimit)
c.wg.Add(1)
go func() {
@@ -64,7 +67,13 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
continue
}
- err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ release, err := c.qosLimiter.ReadRequest(ctx)
+ if err != nil {
+ c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err))
+ c.modeMtx.RUnlock()
+ continue
+ }
+ err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
if err := fl.acquire(oi.DataSize); err != nil {
return err
}
@@ -79,11 +88,15 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
return ctx.Err()
}
})
+ release()
if err != nil {
- c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
+ c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
}
c.modeMtx.RUnlock()
+
+ // counter changed by fstree
+ c.estimateCacheSize()
case <-ctx.Done():
return
}
@@ -107,12 +120,18 @@ func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) {
func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) {
defer fl.release(objInfo.size)
+ release, err := c.qosLimiter.WriteRequest(ctx)
+ if err != nil {
+ c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err))
+ return
+ }
+ defer release()
res, err := c.fsTree.Get(ctx, common.GetPrm{
Address: objInfo.addr,
})
if err != nil {
if !client.IsErrObjectNotFound(err) {
- c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
}
return
}
@@ -126,11 +145,11 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI
c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData)))
}
-func (c *cache) reportFlushError(msg string, addr string, err error) {
+func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) {
if c.reportError != nil {
- c.reportError(msg, err)
+ c.reportError(ctx, msg, err)
} else {
- c.log.Error(msg,
+ c.log.Error(ctx, msg,
zap.String("address", addr),
zap.Error(err))
}
@@ -145,7 +164,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
var obj objectSDK.Object
err := obj.Unmarshal(e.ObjectData)
if err != nil {
- c.reportFlushError(logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
+ c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
if ignoreErrors {
return nil
}
@@ -183,7 +202,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b
if err != nil {
if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
!errors.Is(err, blobstor.ErrNoPlaceFound) {
- c.reportFlushError(logs.FSTreeCantFushObjectBlobstor,
+ c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor,
addr.EncodeToString(), err)
}
return err
@@ -195,7 +214,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b
_, err = c.metabase.UpdateStorageID(ctx, updPrm)
if err != nil {
- c.reportFlushError(logs.FSTreeCantUpdateID,
+ c.reportFlushError(ctx, logs.FSTreeCantUpdateID,
addr.EncodeToString(), err)
}
return err
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 59a4e4895..7fc84657c 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -38,9 +38,9 @@ func TestFlush(t *testing.T) {
errCountOpt := func() (Option, *atomic.Uint32) {
cnt := &atomic.Uint32{}
- return WithReportErrorFunc(func(msg string, err error) {
+ return WithReportErrorFunc(func(ctx context.Context, msg string, err error) {
cnt.Add(1)
- testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
+ testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
}), cnt
}
@@ -114,11 +114,11 @@ func runFlushTest[Option any](
) {
t.Run("no errors", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close()) }()
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
objects := putObjects(t, wc)
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
require.NoError(t, wc.Flush(context.Background(), false, false))
@@ -127,15 +127,15 @@ func runFlushTest[Option any](
t.Run("flush on moving to degraded mode", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close()) }()
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
objects := putObjects(t, wc)
// Blobstor is read-only, so we expect en error from `flush` here.
- require.Error(t, wc.SetMode(mode.Degraded))
+ require.Error(t, wc.SetMode(context.Background(), mode.Degraded))
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
- require.NoError(t, wc.SetMode(mode.Degraded))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, wc.SetMode(context.Background(), mode.Degraded))
check(t, mb, bs, objects)
})
@@ -145,12 +145,12 @@ func runFlushTest[Option any](
t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption()
wc, bs, mb := newCache(t, createCacheFn, errCountOpt)
- defer func() { require.NoError(t, wc.Close()) }()
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
objects := putObjects(t, wc)
f.InjectFn(t, wc)
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
require.Equal(t, uint32(0), errCount.Load())
require.Error(t, wc.Flush(context.Background(), false, false))
@@ -173,7 +173,7 @@ func newCache[Option any](
meta.WithPath(filepath.Join(dir, "meta")),
meta.WithEpochState(dummyEpoch{}))
require.NoError(t, mb.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.Init())
+ require.NoError(t, mb.Init(context.Background()))
bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -184,15 +184,15 @@ func newCache[Option any](
},
}))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init())
+ require.NoError(t, bs.Init(context.Background()))
wc := createCacheFn(t, mb, bs, opts...)
require.NoError(t, wc.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, wc.Init())
+ require.NoError(t, wc.Init(context.Background()))
// First set mode for metabase and blobstor to prevent background flushes.
- require.NoError(t, mb.SetMode(mode.ReadOnly))
- require.NoError(t, bs.SetMode(mode.ReadOnly))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly))
return wc, bs, mb
}
diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go
index 9ec039f91..e369fbd50 100644
--- a/pkg/local_object_storage/writecache/iterate.go
+++ b/pkg/local_object_storage/writecache/iterate.go
@@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
return b.ForEach(func(k, _ []byte) error {
err := addr.DecodeString(string(k))
if err != nil {
- return fmt.Errorf("could not parse object address: %w", err)
+ return fmt.Errorf("parse object address: %w", err)
}
return f(addr)
diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go
index ddc4101be..0e020b36e 100644
--- a/pkg/local_object_storage/writecache/limiter.go
+++ b/pkg/local_object_storage/writecache/limiter.go
@@ -3,6 +3,8 @@ package writecache
import (
"errors"
"sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
var errLimiterClosed = errors.New("acquire failed: limiter closed")
@@ -45,17 +47,11 @@ func (l *flushLimiter) release(size uint64) {
l.cond.L.Lock()
defer l.cond.L.Unlock()
- if l.size >= size {
- l.size -= size
- } else {
- panic("flushLimiter: invalid size")
- }
+ assert.True(l.size >= size, "flushLimiter: invalid size")
+ l.size -= size
- if l.count > 0 {
- l.count--
- } else {
- panic("flushLimiter: invalid count")
- }
+ assert.True(l.count > 0, "flushLimiter: invalid count")
+ l.count--
l.cond.Broadcast()
}
diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go
index db99b203a..1ca3e1156 100644
--- a/pkg/local_object_storage/writecache/limiter_test.go
+++ b/pkg/local_object_storage/writecache/limiter_test.go
@@ -14,7 +14,7 @@ func TestLimiter(t *testing.T) {
l := newFlushLimiter(uint64(maxSize))
var currSize atomic.Int64
var eg errgroup.Group
- for i := 0; i < 10_000; i++ {
+ for range 10_000 {
eg.Go(func() error {
defer l.release(single)
defer currSize.Add(-1)
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index d12dd603b..c491be60b 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -23,8 +23,8 @@ type setModePrm struct {
// SetMode sets write-cache mode of operation.
// When shard is put in read-only mode all objects in memory are flushed to disk
// and all background jobs are suspended.
-func (c *cache) SetMode(m mode.Mode) error {
- ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode",
+func (c *cache) SetMode(ctx context.Context, m mode.Mode) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode",
trace.WithAttributes(
attribute.String("mode", m.String()),
))
@@ -60,7 +60,7 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
// guarantees that there are no in-fly operations.
for len(c.flushCh) != 0 {
- c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
+ c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush)
time.Sleep(time.Second)
}
@@ -82,8 +82,8 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
return nil
}
if !shrink {
- if err := c.fsTree.Close(); err != nil {
- return fmt.Errorf("can't close write-cache storage: %w", err)
+ if err := c.fsTree.Close(ctx); err != nil {
+ return fmt.Errorf("close write-cache storage: %w", err)
}
return nil
}
@@ -98,19 +98,19 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
if errors.Is(err, errIterationCompleted) {
empty = false
} else {
- return fmt.Errorf("failed to check write-cache items: %w", err)
+ return fmt.Errorf("check write-cache items: %w", err)
}
}
- if err := c.fsTree.Close(); err != nil {
- return fmt.Errorf("can't close write-cache storage: %w", err)
+ if err := c.fsTree.Close(ctx); err != nil {
+ return fmt.Errorf("close write-cache storage: %w", err)
}
if empty {
err := os.RemoveAll(c.path)
if err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("failed to remove write-cache files: %w", err)
+ return fmt.Errorf("remove write-cache files: %w", err)
}
} else {
- c.log.Info(logs.WritecacheShrinkSkippedNotEmpty)
+ c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
}
return nil
}
diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go
index 70cfe8382..4fbadbc64 100644
--- a/pkg/local_object_storage/writecache/mode_test.go
+++ b/pkg/local_object_storage/writecache/mode_test.go
@@ -18,13 +18,13 @@ func TestMode(t *testing.T) {
require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init())
+ require.NoError(t, wc.Init(context.Background()))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close())
+ require.NoError(t, wc.Close(context.Background()))
require.NoError(t, wc.Open(context.Background(), mode.Degraded))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init())
+ require.NoError(t, wc.Init(context.Background()))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close())
+ require.NoError(t, wc.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 66ac7805c..a4f98ad06 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,8 +1,10 @@
package writecache
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
// Option represents write-cache configuration option.
@@ -29,19 +31,21 @@ type options struct {
// noSync is true iff FSTree allows unsynchronized writes.
noSync bool
// reportError is the function called when encountering disk errors in background workers.
- reportError func(string, error)
+ reportError func(context.Context, string, error)
// metrics is metrics implementation
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
disableBackgroundFlush bool
// flushSizeLimit is total size of flushing objects.
flushSizeLimit uint64
+ // qosLimiter used to limit flush RPS.
+ qosLimiter qos.Limiter
}
// WithLogger sets logger.
func WithLogger(log *logger.Logger) Option {
return func(o *options) {
- o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))}
+ o.log = log
}
}
@@ -108,7 +112,7 @@ func WithNoSync(noSync bool) Option {
}
// WithReportErrorFunc sets error reporting function.
-func WithReportErrorFunc(f func(string, error)) Option {
+func WithReportErrorFunc(f func(context.Context, string, error)) Option {
return func(o *options) {
o.reportError = f
}
@@ -134,3 +138,9 @@ func WithFlushSizeLimit(v uint64) Option {
o.flushSizeLimit = v
}
}
+
+func WithQoSLimiter(l qos.Limiter) Option {
+ return func(o *options) {
+ o.qosLimiter = l
+ }
+}
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index c53067bea..2fbf50913 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -2,6 +2,7 @@ package writecache
import (
"context"
+ "fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -59,7 +60,15 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro
// putBig writes object to FSTree and pushes it to the flush workers queue.
func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
- if !c.hasEnoughSpaceFS() {
+ if prm.RawData == nil { // foolproof: RawData should be marshalled by shard.
+ data, err := prm.Object.Marshal()
+ if err != nil {
+ return fmt.Errorf("cannot marshal object: %w", err)
+ }
+ prm.RawData = data
+ }
+ size := uint64(len(prm.RawData))
+ if !c.hasEnoughSpace(size) {
return ErrOutOfSpace
}
@@ -68,7 +77,7 @@ func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
return err
}
- storagelog.Write(c.log,
+ storagelog.Write(ctx, c.log,
storagelog.AddressField(prm.Address.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree PUT"),
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index 835686fbb..7a52d3672 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -7,10 +7,6 @@ func (c *cache) estimateCacheSize() (uint64, uint64) {
return count, size
}
-func (c *cache) hasEnoughSpaceFS() bool {
- return c.hasEnoughSpace(c.maxObjectSize)
-}
-
func (c *cache) hasEnoughSpace(objectSize uint64) bool {
count, size := c.estimateCacheSize()
if c.maxCacheCount > 0 && count+1 > c.maxCacheCount {
@@ -19,7 +15,6 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool {
return c.maxCacheSize >= size+objectSize
}
-func (c *cache) initCounters() error {
+func (c *cache) initCounters() {
c.estimateCacheSize()
- return nil
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 2e52e5b20..e88566cdf 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
fstree.WithFileCounter(c.counter),
)
if err := c.fsTree.Open(mod); err != nil {
- return fmt.Errorf("could not open FSTree: %w", err)
+ return fmt.Errorf("open FSTree: %w", err)
}
if err := c.fsTree.Init(); err != nil {
- return fmt.Errorf("could not init FSTree: %w", err)
+ return fmt.Errorf("init FSTree: %w", err)
}
return nil
@@ -43,9 +43,9 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
if err != nil && !client.IsErrObjectNotFound(err) {
- c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
+ c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
} else if err == nil {
- storagelog.Write(c.log,
+ storagelog.Write(ctx, c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go
index 3a100f1a3..5eb341ba4 100644
--- a/pkg/local_object_storage/writecache/upgrade.go
+++ b/pkg/local_object_storage/writecache/upgrade.go
@@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
return nil
}
if err != nil {
- return fmt.Errorf("could not check write-cache database existence: %w", err)
+ return fmt.Errorf("check write-cache database existence: %w", err)
}
db, err := OpenDB(c.path, true, os.OpenFile)
if err != nil {
- return fmt.Errorf("could not open write-cache database: %w", err)
+ return fmt.Errorf("open write-cache database: %w", err)
}
defer func() {
_ = db.Close()
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index a973df604..7ed511318 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -38,21 +38,21 @@ type Cache interface {
// Returns ErrReadOnly if the Cache is currently in the read-only mode.
Delete(context.Context, oid.Address) error
Put(context.Context, common.PutPrm) (common.PutRes, error)
- SetMode(mode.Mode) error
+ SetMode(context.Context, mode.Mode) error
SetLogger(*logger.Logger)
DumpInfo() Info
Flush(context.Context, bool, bool) error
Seal(context.Context, SealPrm) error
- Init() error
+ Init(context.Context) error
Open(ctx context.Context, mode mode.Mode) error
- Close() error
+ Close(context.Context) error
GetMetrics() Metrics
}
// MainStorage is the interface of the underlying storage of Cache implementations.
type MainStorage interface {
- Compressor() *compression.Config
+ Compressor() *compression.Compressor
Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error)
Put(context.Context, common.PutPrm) (common.PutRes, error)
}
diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go
index b6718dea5..2849f3052 100644
--- a/pkg/morph/client/actor.go
+++ b/pkg/morph/client/actor.go
@@ -16,7 +16,7 @@ type actorProvider interface {
GetRPCActor() actor.RPCActor
}
-// Client switches an established connection with neo-go if it is broken.
+// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken.
// This leads to an invalidation of an rpc actor within Client. That means the
// components that are initilized with the rpc actor may unintentionally use
// it when it is already invalidated. SwitchRPCGuardedActor is used to prevent
diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go
index aae245acd..4462daab4 100644
--- a/pkg/morph/client/balance/balanceOf.go
+++ b/pkg/morph/client/balance/balanceOf.go
@@ -1,36 +1,33 @@
package balance
import (
+ "context"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// BalanceOf receives the amount of funds in the client's account
// through the Balance contract call, and returns it.
-func (c *Client) BalanceOf(id user.ID) (*big.Int, error) {
- h, err := address.StringToUint160(id.EncodeToString())
- if err != nil {
- return nil, err
- }
+func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) {
+ h := id.ScriptHash()
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(balanceOfMethod)
invokePrm.SetArgs(h)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err)
} else if ln := len(prms); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln)
}
amount, err := client.BigIntFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err)
}
return amount, nil
}
diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go
index 4befbef45..f4685b0ab 100644
--- a/pkg/morph/client/balance/burn.go
+++ b/pkg/morph/client/balance/burn.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -30,12 +32,12 @@ func (b *BurnPrm) SetID(id []byte) {
}
// Burn destroys funds from the account.
-func (c *Client) Burn(p BurnPrm) error {
+func (c *Client) Burn(ctx context.Context, p BurnPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(burnMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go
index b05c526dc..1dacb9574 100644
--- a/pkg/morph/client/balance/client.go
+++ b/pkg/morph/client/balance/client.go
@@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("could not create static client of Balance contract: %w", err)
+ return nil, fmt.Errorf("create 'balance' contract client: %w", err)
}
return &Client{
@@ -54,15 +54,7 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go
index 39e4b28e5..57e61d62b 100644
--- a/pkg/morph/client/balance/decimals.go
+++ b/pkg/morph/client/balance/decimals.go
@@ -1,6 +1,7 @@
package balance
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,20 +9,20 @@ import (
// Decimals decimal precision of currency transactions
// through the Balance contract call, and returns it.
-func (c *Client) Decimals() (uint32, error) {
+func (c *Client) Decimals(ctx context.Context) (uint32, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(decimalsMethod)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err)
} else if ln := len(prms); ln != 1 {
return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln)
}
decimals, err := client.IntFromStackItem(prms[0])
if err != nil {
- return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err)
}
return uint32(decimals), nil
}
diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go
index a5b206799..83e8b0586 100644
--- a/pkg/morph/client/balance/lock.go
+++ b/pkg/morph/client/balance/lock.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -42,12 +44,12 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) {
}
// Lock locks fund on the user account.
-func (c *Client) Lock(p LockPrm) error {
+func (c *Client) Lock(ctx context.Context, p LockPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(lockMethod)
prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go
index 73448da31..082ade85e 100644
--- a/pkg/morph/client/balance/mint.go
+++ b/pkg/morph/client/balance/mint.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -30,12 +32,12 @@ func (m *MintPrm) SetID(id []byte) {
}
// Mint sends funds to the account.
-func (c *Client) Mint(p MintPrm) error {
+func (c *Client) Mint(ctx context.Context, p MintPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(mintMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go
index 08fb05289..870bed166 100644
--- a/pkg/morph/client/balance/transfer.go
+++ b/pkg/morph/client/balance/transfer.go
@@ -1,11 +1,11 @@
package balance
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// TransferPrm groups parameters of TransferX method.
@@ -21,27 +21,18 @@ type TransferPrm struct {
// TransferX transfers p.Amount of GASe-12 from p.From to p.To
// with details p.Details through direct smart contract call.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) TransferX(p TransferPrm) error {
- from, err := address.StringToUint160(p.From.EncodeToString())
- if err != nil {
- return err
- }
-
- to, err := address.StringToUint160(p.To.EncodeToString())
- if err != nil {
- return err
- }
+func (c *Client) TransferX(ctx context.Context, p TransferPrm) error {
+ from := p.From.ScriptHash()
+ to := p.To.ScriptHash()
prm := client.InvokePrm{}
prm.SetMethod(transferXMethod)
prm.SetArgs(from, to, p.Amount, p.Details)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err = c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err)
+ return fmt.Errorf("invoke method (%s): %w", transferXMethod, err)
}
return nil
}
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index df521f56b..aab058d27 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -9,6 +9,7 @@ import (
"sync/atomic"
"time"
+ nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
@@ -19,6 +20,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
@@ -59,6 +61,9 @@ type Client struct {
rpcActor *actor.Actor // neo-go RPC actor
gasToken *nep17.Token // neo-go GAS token wrapper
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
+ nnsHash util.Uint160 // NNS contract hash
+
+ nnsReader *nnsClient.ContractReader // NNS contract wrapper
acc *wallet.Account // neo account
accAddr util.Uint160 // account's address
@@ -93,27 +98,12 @@ type Client struct {
type cache struct {
m sync.RWMutex
- nnsHash *util.Uint160
gKey *keys.PublicKey
txHeights *lru.Cache[util.Uint256, uint32]
metrics metrics.MorphCacheMetrics
}
-func (c *cache) nns() *util.Uint160 {
- c.m.RLock()
- defer c.m.RUnlock()
-
- return c.nnsHash
-}
-
-func (c *cache) setNNSHash(nnsHash util.Uint160) {
- c.m.Lock()
- defer c.m.Unlock()
-
- c.nnsHash = &nnsHash
-}
-
func (c *cache) groupKey() *keys.PublicKey {
c.m.RLock()
defer c.m.RUnlock()
@@ -132,7 +122,6 @@ func (c *cache) invalidate() {
c.m.Lock()
defer c.m.Unlock()
- c.nnsHash = nil
c.gKey = nil
c.txHeights.Purge()
}
@@ -162,24 +151,10 @@ func (e *notHaltStateError) Error() string {
)
}
-// implementation of error interface for FrostFS-specific errors.
-type frostfsError struct {
- err error
-}
-
-func (e frostfsError) Error() string {
- return fmt.Sprintf("frostfs error: %v", e.err)
-}
-
-// wraps FrostFS-specific error into frostfsError. Arg must not be nil.
-func wrapFrostFSError(err error) error {
- return frostfsError{err}
-}
-
// Invoke invokes contract method by sending transaction into blockchain.
// Returns valid until block value.
// Supported args types: int64, string, util.Uint160, []byte and bool.
-func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
+func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) {
start := time.Now()
success := false
defer func() {
@@ -190,29 +165,29 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string,
defer c.switchLock.RUnlock()
if c.inactive {
- return 0, ErrConnectionLost
+ return InvokeRes{}, ErrConnectionLost
}
txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
if err != nil {
- return 0, fmt.Errorf("could not invoke %s: %w", method, err)
+ return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err)
}
- c.logger.Debug(logs.ClientNeoClientInvoke,
+ c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
zap.String("method", method),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
success = true
- return vub, nil
+ return InvokeRes{Hash: txHash, VUB: vub}, nil
}
// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
// If cb returns an error, the session is closed and this error is returned as-is.
-// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
+// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
// The default batchSize is 100, the default limit from neo-go.
-func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error {
+func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error {
start := time.Now()
success := false
defer func() {
@@ -239,7 +214,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
if err != nil {
return err
} else if val.State != HaltState {
- return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
+ return ¬HaltStateError{state: val.State, exception: val.FaultException}
}
arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err)
@@ -261,10 +236,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
}()
// Batch size for TraverseIterator() can restricted on the server-side.
- traverseBatchSize := batchSize
- if invoker.DefaultIteratorResultItems < traverseBatchSize {
- traverseBatchSize = invoker.DefaultIteratorResultItems
- }
+ traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems)
for {
items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize)
if err != nil {
@@ -306,7 +278,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (
}
if val.State != HaltState {
- return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
+ return nil, ¬HaltStateError{state: val.State, exception: val.FaultException}
}
success = true
@@ -327,7 +299,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
return err
}
- c.logger.Debug(logs.ClientNativeGasTransferInvoke,
+ c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke,
zap.String("to", receiver.StringLE()),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -361,7 +333,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
return err
}
- c.logger.Debug(logs.ClientBatchGasTransferInvoke,
+ c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke,
zap.Strings("to", receiversLog),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -388,8 +360,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(logs.ClientCantGetBlockchainHeight,
- zap.String("error", err.Error()))
+ c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
+ zap.Error(err))
return nil
}
@@ -402,8 +374,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(logs.ClientCantGetBlockchainHeight243,
- zap.String("error", err.Error()))
+ c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
+ zap.Error(err))
return nil
}
@@ -461,6 +433,28 @@ func (c *Client) TxHalt(h util.Uint256) (res bool, err error) {
return len(aer.Executions) > 0 && aer.Executions[0].VMState.HasFlag(vmstate.Halt), nil
}
+func (c *Client) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ if c.inactive {
+ return nil, ErrConnectionLost
+ }
+
+ return c.client.GetApplicationLog(hash, trig)
+}
+
+func (c *Client) GetVersion() (*result.Version, error) {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ if c.inactive {
+ return nil, ErrConnectionLost
+ }
+
+ return c.client.GetVersion()
+}
+
// TxHeight returns true if transaction has been successfully executed and persisted.
func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
c.switchLock.RLock()
@@ -476,7 +470,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
// NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain
// stores alphabet node keys of inner ring there, however the sidechain stores both
// alphabet and non alphabet node keys of inner ring.
-func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
+func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -486,7 +480,7 @@ func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
list, err := c.roleList(noderoles.NeoFSAlphabet)
if err != nil {
- return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err)
+ return nil, fmt.Errorf("get alphabet nodes role list: %w", err)
}
return list, nil
@@ -500,7 +494,7 @@ func (c *Client) GetDesignateHash() util.Uint160 {
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
height, err := c.rpcActor.GetBlockCount()
if err != nil {
- return nil, fmt.Errorf("can't get chain height: %w", err)
+ return nil, fmt.Errorf("get chain height: %w", err)
}
return c.rolemgmt.GetDesignatedByRole(r, height)
@@ -571,6 +565,7 @@ func (c *Client) setActor(act *actor.Actor) {
c.rpcActor = act
c.gasToken = nep17.New(act, gas.Hash)
c.rolemgmt = rolemgmt.New(act)
+ c.nnsReader = nnsClient.NewReader(act, c.nnsHash)
}
func (c *Client) GetActor() *actor.Actor {
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index 78cb3e82f..e4dcd0db7 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "net"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -41,13 +42,13 @@ type cfg struct {
endpoints []Endpoint
- singleCli *rpcclient.WSClient // neo-go client for single client mode
-
inactiveModeCb Callback
switchInterval time.Duration
morphCacheMetrics metrics.MorphCacheMetrics
+
+ dialerSource DialerSource
}
const (
@@ -60,13 +61,14 @@ var ErrNoHealthyEndpoint = errors.New("no healthy endpoint")
func defaultConfig() *cfg {
return &cfg{
dialTimeout: defaultDialTimeout,
- logger: &logger.Logger{Logger: zap.L()},
+ logger: logger.NewLoggerWrapper(zap.L()),
metrics: morphmetrics.NoopRegister{},
waitInterval: defaultWaitInterval,
signer: &transaction.Signer{
Scopes: transaction.Global,
},
morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{},
+ dialerSource: &noopDialerSource{},
}
}
@@ -124,41 +126,30 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
var err error
var act *actor.Actor
- if cfg.singleCli != nil {
- // return client in single RPC node mode that uses
- // predefined WS client
- //
- // in case of the closing web socket connection:
- // if extra endpoints were provided via options,
- // they will be used in switch process, otherwise
- // inactive mode will be enabled
- cli.client = cfg.singleCli
-
- act, err = newActor(cfg.singleCli, acc, *cfg)
+ var endpoint Endpoint
+ for cli.endpoints.curr, endpoint = range cli.endpoints.list {
+ cli.client, act, err = cli.newCli(ctx, endpoint)
if err != nil {
- return nil, fmt.Errorf("could not create RPC actor: %w", err)
- }
- } else {
- var endpoint Endpoint
- for cli.endpoints.curr, endpoint = range cli.endpoints.list {
- cli.client, act, err = cli.newCli(ctx, endpoint)
- if err != nil {
- cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
- zap.Error(err), zap.String("endpoint", endpoint.Address))
- } else {
- cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint,
- zap.String("endpoint", endpoint.Address))
- if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
- cli.switchIsActive.Store(true)
- go cli.switchToMostPrioritized(ctx)
- }
- break
+ cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
+ zap.Error(err), zap.String("endpoint", endpoint.Address))
+ } else {
+ cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint,
+ zap.String("endpoint", endpoint.Address))
+ if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
+ cli.switchIsActive.Store(true)
+ go cli.switchToMostPrioritized(ctx)
}
- }
- if cli.client == nil {
- return nil, ErrNoHealthyEndpoint
+ break
}
}
+ if cli.client == nil {
+ return nil, ErrNoHealthyEndpoint
+ }
+ cs, err := cli.client.GetContractStateByID(nnsContractID)
+ if err != nil {
+ return nil, fmt.Errorf("resolve nns hash: %w", err)
+ }
+ cli.nnsHash = cs.Hash
cli.setActor(act)
go cli.closeWaiter(ctx)
@@ -175,6 +166,7 @@ func (c *Client) newCli(ctx context.Context, endpoint Endpoint) (*rpcclient.WSCl
Options: rpcclient.Options{
DialTimeout: c.cfg.dialTimeout,
TLSClientConfig: cfg,
+ NetDialContext: c.cfg.dialerSource.NetContextDialer(),
},
})
if err != nil {
@@ -285,17 +277,6 @@ func WithEndpoints(endpoints ...Endpoint) Option {
}
}
-// WithSingleClient returns a client constructor option
-// that specifies single neo-go client and forces Client
-// to use it for requests.
-//
-// Passed client must already be initialized.
-func WithSingleClient(cli *rpcclient.WSClient) Option {
- return func(c *cfg) {
- c.singleCli = cli
- }
-}
-
// WithConnLostCallback return a client constructor option
// that specifies a callback that is called when Client
// unsuccessfully tried to connect to all the specified
@@ -320,3 +301,19 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option {
c.morphCacheMetrics = morphCacheMetrics
}
}
+
+type DialerSource interface {
+ NetContextDialer() func(context.Context, string, string) (net.Conn, error)
+}
+
+type noopDialerSource struct{}
+
+func (ds *noopDialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
+ return nil
+}
+
+func WithDialerSource(ds DialerSource) Option {
+ return func(c *cfg) {
+ c.dialerSource = ds
+ }
+}
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index 9dd3a337b..be684619b 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -27,17 +27,8 @@ const (
getMethod = "get"
listMethod = "list"
containersOfMethod = "containersOf"
- eaclMethod = "eACL"
- setEACLMethod = "setEACL"
deletionInfoMethod = "deletionInfo"
- startEstimationMethod = "startContainerEstimation"
- stopEstimationMethod = "stopContainerEstimation"
-
- putSizeMethod = "putContainerSize"
- listSizesMethod = "listContainerSizes"
- getSizeMethod = "getContainerSize"
-
// putNamedMethod is method name for container put with an alias. It is exported to provide custom fee.
putNamedMethod = "putNamed"
)
@@ -55,9 +46,9 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
opts[i](o)
}
- sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...)
+ sc, err := client.NewStatic(cli, contract, fee, *o...)
if err != nil {
- return nil, fmt.Errorf("can't create container static client: %w", err)
+ return nil, fmt.Errorf("create 'container' contract client: %w", err)
}
return &Client{client: sc}, nil
@@ -77,20 +68,10 @@ func (c Client) ContractAddress() util.Uint160 {
// parameter of Wrapper.
type Option func(*opts)
-type opts struct {
- staticOpts []client.StaticClientOption
-}
+type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- o.staticOpts = append(o.staticOpts, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
@@ -100,6 +81,6 @@ func TryNotary() Option {
// Considered to be used by IR nodes only.
func AsAlphabet() Option {
return func(o *opts) {
- o.staticOpts = append(o.staticOpts, client.AsAlphabet())
+ *o = append(*o, client.AsAlphabet())
}
}
diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go
index c4db0fe6e..60fb8ad7c 100644
--- a/pkg/morph/client/container/containers_of.go
+++ b/pkg/morph/client/container/containers_of.go
@@ -1,10 +1,9 @@
package container
import (
+ "context"
"errors"
- "fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
@@ -15,28 +14,37 @@ import (
// to the specified user of FrostFS system. If idUser is nil, returns the list of all containers.
//
// If remote RPC does not support neo-go session API, fallback to List() method.
-func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
- var rawID []byte
+func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) {
+ var cidList []cid.ID
+ var err error
+ cb := func(id cid.ID) error {
+ cidList = append(cidList, id)
+ return nil
+ }
+ if err = c.IterateContainersOf(ctx, idUser, cb); err != nil {
+ return nil, err
+ }
+ return cidList, nil
+}
+
+// iterateContainers iterates over a list of container identifiers
+// belonging to the specified user of FrostFS system and executes
+// `cb` on each element. If idUser is nil, calls it on the list of all containers.
+func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error {
+ var rawID []byte
if idUser != nil {
rawID = idUser.WalletBytes()
}
- var cidList []cid.ID
- cb := func(item stackitem.Item) error {
- rawID, err := client.BytesFromStackItem(item)
+ itemCb := func(item stackitem.Item) error {
+ id, err := getCIDfromStackItem(item)
if err != nil {
- return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err)
+ return err
}
-
- var id cid.ID
-
- err = id.Decode(rawID)
- if err != nil {
- return fmt.Errorf("decode container ID: %w", err)
+ if err = cb(id); err != nil {
+ return err
}
-
- cidList = append(cidList, id)
return nil
}
@@ -50,13 +58,10 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
const batchSize = 512
cnrHash := c.client.ContractAddress()
- err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID)
- if err != nil {
- if errors.Is(err, unwrap.ErrNoSessionID) {
- return c.list(idUser)
- }
- return nil, err
+ err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID)
+ if err != nil && errors.Is(err, unwrap.ErrNoSessionID) {
+ return c.iterate(ctx, idUser, cb)
}
- return cidList, nil
+ return err
}
diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go
index 20351b570..09912efa5 100644
--- a/pkg/morph/client/container/delete.go
+++ b/pkg/morph/client/container/delete.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/sha256"
"fmt"
@@ -12,7 +13,7 @@ import (
// along with signature and session token.
//
// Returns error if container ID is nil.
-func Delete(c *Client, witness core.RemovalWitness) error {
+func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
binCnr := make([]byte, sha256.Size)
witness.ContainerID.Encode(binCnr)
@@ -26,7 +27,7 @@ func Delete(c *Client, witness core.RemovalWitness) error {
prm.SetToken(tok.Marshal())
}
- _, err := c.Delete(prm)
+ _, err := c.Delete(ctx, prm)
return err
}
@@ -65,9 +66,7 @@ func (d *DeletePrm) SetKey(key []byte) {
//
// Returns valid until block and any error encountered that caused
// the removal to interrupt.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) Delete(p DeletePrm) (uint32, error) {
+func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
if len(p.signature) == 0 && !p.IsControl() {
return 0, errNilArgument
}
@@ -77,9 +76,9 @@ func (c *Client) Delete(p DeletePrm) (uint32, error) {
prm.SetArgs(p.cnr, p.signature, p.key, p.token)
prm.InvokePrmOptional = p.InvokePrmOptional
- res, err := c.client.Invoke(prm)
+ res, err := c.client.Invoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err)
+ return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err)
}
return res.VUB, nil
}
diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go
index dda6bf98c..90bcdd7d5 100644
--- a/pkg/morph/client/container/deletion_info.go
+++ b/pkg/morph/client/container/deletion_info.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/sha256"
"fmt"
"strings"
@@ -14,39 +15,39 @@ import (
"github.com/mr-tron/base58"
)
-func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) {
- return DeletionInfo((*Client)(x), cnr)
+func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) {
+ return DeletionInfo(ctx, (*Client)(x), cnr)
}
type deletionInfo interface {
- DeletionInfo(cid []byte) (*containercore.DelInfo, error)
+ DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error)
}
-func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
+func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.DeletionInfo(binCnr)
+ return c.DeletionInfo(ctx, binCnr)
}
-func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
+func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(deletionInfoMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err)
}
if len(arr) != 2 {
@@ -55,17 +56,17 @@ func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
rawOwner, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err)
}
var owner user.ID
if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil {
- return nil, fmt.Errorf("could not decode container owner id (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err)
}
epoch, err := client.BigIntFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err)
}
return &containercore.DelInfo{
diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go
deleted file mode 100644
index 8e9455050..000000000
--- a/pkg/morph/client/container/eacl.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package container
-
-import (
- "crypto/sha256"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
-)
-
-// GetEACL reads the extended ACL table from FrostFS system
-// through Container contract call.
-//
-// Returns apistatus.EACLNotFound if eACL table is missing in the contract.
-func (c *Client) GetEACL(cnr cid.ID) (*container.EACL, error) {
- binCnr := make([]byte, sha256.Size)
- cnr.Encode(binCnr)
-
- prm := client.TestInvokePrm{}
- prm.SetMethod(eaclMethod)
- prm.SetArgs(binCnr)
-
- prms, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", eaclMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", eaclMethod, ln)
- }
-
- arr, err := client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get item array of eACL (%s): %w", eaclMethod, err)
- }
-
- if len(arr) != 4 {
- return nil, fmt.Errorf("unexpected eacl stack item count (%s): %d", eaclMethod, len(arr))
- }
-
- rawEACL, err := client.BytesFromStackItem(arr[0])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL (%s): %w", eaclMethod, err)
- }
-
- sig, err := client.BytesFromStackItem(arr[1])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL signature (%s): %w", eaclMethod, err)
- }
-
- // Client may not return errors if the table is missing, so check this case additionally.
- // The absence of a signature in the response can be taken as an eACL absence criterion,
- // since unsigned table cannot be approved in the storage by design.
- if len(sig) == 0 {
- return nil, new(apistatus.EACLNotFound)
- }
-
- pub, err := client.BytesFromStackItem(arr[2])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL public key (%s): %w", eaclMethod, err)
- }
-
- binToken, err := client.BytesFromStackItem(arr[3])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL session token (%s): %w", eaclMethod, err)
- }
-
- var res container.EACL
-
- res.Value = eacl.NewTable()
- if err = res.Value.Unmarshal(rawEACL); err != nil {
- return nil, err
- }
-
- if len(binToken) > 0 {
- res.Session = new(session.Container)
-
- err = res.Session.Unmarshal(binToken)
- if err != nil {
- return nil, fmt.Errorf("could not unmarshal session token: %w", err)
- }
- }
-
- // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
- var sigV2 refs.Signature
- sigV2.SetKey(pub)
- sigV2.SetSign(sig)
- sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256)
-
- err = res.Signature.ReadFromV2(sigV2)
- return &res, err
-}
diff --git a/pkg/morph/client/container/estimations.go b/pkg/morph/client/container/estimations.go
deleted file mode 100644
index f288c63cf..000000000
--- a/pkg/morph/client/container/estimations.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package container
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-// StartEstimationPrm groups parameters of StartEstimation operation.
-type StartEstimationPrm struct {
- commonEstimationPrm
-}
-
-// StopEstimationPrm groups parameters of StopEstimation operation.
-type StopEstimationPrm struct {
- commonEstimationPrm
-}
-
-type commonEstimationPrm struct {
- epoch uint64
-
- client.InvokePrmOptional
-}
-
-// SetEpoch sets epoch.
-func (p *commonEstimationPrm) SetEpoch(epoch uint64) {
- p.epoch = epoch
-}
-
-// StartEstimation votes to produce start estimation notification.
-func (c *Client) StartEstimation(p StartEstimationPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(startEstimationMethod)
- prm.SetArgs(p.epoch)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- if _, err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", startEstimationMethod, err)
- }
- return nil
-}
-
-// StopEstimation votes to produce stop estimation notification.
-func (c *Client) StopEstimation(p StopEstimationPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(stopEstimationMethod)
- prm.SetArgs(p.epoch)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- if _, err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", stopEstimationMethod, err)
- }
- return nil
-}
diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go
index 6715f870f..8622d2cdd 100644
--- a/pkg/morph/client/container/get.go
+++ b/pkg/morph/client/container/get.go
@@ -1,14 +1,15 @@
package container
import (
+ "context"
"crypto/sha256"
"fmt"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@@ -16,8 +17,8 @@ import (
type containerSource Client
-func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) {
- return Get((*Client)(x), cnr)
+func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) {
+ return Get(ctx, (*Client)(x), cnr)
}
// AsContainerSource provides container Source interface
@@ -27,15 +28,15 @@ func AsContainerSource(w *Client) containercore.Source {
}
type getContainer interface {
- Get(cid []byte) (*containercore.Container, error)
+ Get(ctx context.Context, cid []byte) (*containercore.Container, error)
}
// Get marshals container ID, and passes it to Wrapper's Get method.
-func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) {
+func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.Get(binCnr)
+ return c.Get(ctx, binCnr)
}
// Get reads the container from FrostFS system by binary identifier
@@ -43,24 +44,24 @@ func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) {
//
// If an empty slice is returned for the requested identifier,
// storage.ErrNotFound error is returned.
-func (c *Client) Get(cid []byte) (*containercore.Container, error) {
+func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(getMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err)
}
if len(arr) != 4 {
@@ -69,29 +70,29 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) {
cnrBytes, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err)
}
sigBytes, err := client.BytesFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err)
}
pub, err := client.BytesFromStackItem(arr[2])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err)
}
tokBytes, err := client.BytesFromStackItem(arr[3])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err)
}
var cnr containercore.Container
if err := cnr.Value.Unmarshal(cnrBytes); err != nil {
// use other major version if there any
- return nil, fmt.Errorf("can't unmarshal container: %w", err)
+ return nil, fmt.Errorf("unmarshal container: %w", err)
}
if len(tokBytes) > 0 {
@@ -99,7 +100,7 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) {
err = cnr.Session.Unmarshal(tokBytes)
if err != nil {
- return nil, fmt.Errorf("could not unmarshal session token: %w", err)
+ return nil, fmt.Errorf("unmarshal session token: %w", err)
}
}
diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go
index 6fed46c1a..fc63d1beb 100644
--- a/pkg/morph/client/container/list.go
+++ b/pkg/morph/client/container/list.go
@@ -1,20 +1,22 @@
package container
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
-// list returns a list of container identifiers belonging
+// iterate iterates through a list of container identifiers belonging
// to the specified user of FrostFS system. The list is composed
// through Container contract call.
//
-// Returns the identifiers of all FrostFS containers if pointer
+// Iterates through the identifiers of all FrostFS containers if pointer
// to user identifier is nil.
-func (c *Client) list(idUser *user.ID) ([]cid.ID, error) {
+func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error {
var rawID []byte
if idUser != nil {
@@ -25,34 +27,43 @@ func (c *Client) list(idUser *user.ID) ([]cid.ID, error) {
prm.SetMethod(listMethod)
prm.SetArgs(rawID)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err)
+ return fmt.Errorf("test invoke (%s): %w", listMethod, err)
} else if ln := len(res); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
+ return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
}
res, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err)
+ return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err)
}
- cidList := make([]cid.ID, 0, len(res))
for i := range res {
- rawID, err := client.BytesFromStackItem(res[i])
+ id, err := getCIDfromStackItem(res[i])
if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err)
+ return err
}
- var id cid.ID
-
- err = id.Decode(rawID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
+ if err = cb(id); err != nil {
+ return err
}
-
- cidList = append(cidList, id)
}
- return cidList, nil
+ return nil
+}
+
+func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) {
+ rawID, err := client.BytesFromStackItem(item)
+ if err != nil {
+ return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err)
+ }
+
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
+ }
+ return id, nil
}
diff --git a/pkg/morph/client/container/load.go b/pkg/morph/client/container/load.go
deleted file mode 100644
index 5e2c3c2c3..000000000
--- a/pkg/morph/client/container/load.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package container
-
-import (
- "fmt"
-
- v2refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
-)
-
-// EstimationID is an identity of container load estimation inside Container contract.
-type EstimationID []byte
-
-// ListLoadEstimationsByEpoch returns a list of container load estimations for to the specified epoch.
-// The list is composed through Container contract call.
-func (c *Client) ListLoadEstimationsByEpoch(epoch uint64) ([]EstimationID, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(listSizesMethod)
- invokePrm.SetArgs(epoch)
-
- prms, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listSizesMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", listSizesMethod, ln)
- }
-
- prms, err = client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listSizesMethod, err)
- }
-
- res := make([]EstimationID, 0, len(prms))
- for i := range prms {
- id, err := client.BytesFromStackItem(prms[i])
- if err != nil {
- return nil, fmt.Errorf("could not get ID byte array from stack item (%s): %w", listSizesMethod, err)
- }
-
- res = append(res, id)
- }
-
- return res, nil
-}
-
-// Estimation is a structure of single container load estimation
-// reported by storage node.
-type Estimation struct {
- Size uint64
-
- Reporter []byte
-}
-
-// Estimations is a structure of grouped container load estimation inside Container contract.
-type Estimations struct {
- ContainerID cid.ID
-
- Values []Estimation
-}
-
-// GetUsedSpaceEstimations returns a list of container load estimations by ID.
-// The list is composed through Container contract call.
-func (c *Client) GetUsedSpaceEstimations(id EstimationID) (*Estimations, error) {
- prm := client.TestInvokePrm{}
- prm.SetMethod(getSizeMethod)
- prm.SetArgs([]byte(id))
-
- prms, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getSizeMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", getSizeMethod, ln)
- }
-
- prms, err = client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack items of estimation fields from stack item (%s): %w", getSizeMethod, err)
- } else if ln := len(prms); ln != 2 {
- return nil, fmt.Errorf("unexpected stack item count of estimations fields (%s)", getSizeMethod)
- }
-
- rawCnr, err := client.BytesFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get container ID byte array from stack item (%s): %w", getSizeMethod, err)
- }
-
- prms, err = client.ArrayFromStackItem(prms[1])
- if err != nil {
- return nil, fmt.Errorf("could not get estimation list array from stack item (%s): %w", getSizeMethod, err)
- }
-
- var cnr cid.ID
-
- err = cnr.Decode(rawCnr)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
- }
-
- v2 := new(v2refs.ContainerID)
- v2.SetValue(rawCnr)
- res := &Estimations{
- ContainerID: cnr,
- Values: make([]Estimation, 0, len(prms)),
- }
-
- for i := range prms {
- arr, err := client.ArrayFromStackItem(prms[i])
- if err != nil {
- return nil, fmt.Errorf("could not get estimation struct from stack item (%s): %w", getSizeMethod, err)
- } else if ln := len(arr); ln != 2 {
- return nil, fmt.Errorf("unexpected stack item count of estimation fields (%s)", getSizeMethod)
- }
-
- reporter, err := client.BytesFromStackItem(arr[0])
- if err != nil {
- return nil, fmt.Errorf("could not get reporter byte array from stack item (%s): %w", getSizeMethod, err)
- }
-
- sz, err := client.IntFromStackItem(arr[1])
- if err != nil {
- return nil, fmt.Errorf("could not get estimation size from stack item (%s): %w", getSizeMethod, err)
- }
-
- res.Values = append(res.Values, Estimation{
- Reporter: reporter,
- Size: uint64(sz),
- })
- }
-
- return res, nil
-}
diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go
index ee323af00..3bb84eb87 100644
--- a/pkg/morph/client/container/put.go
+++ b/pkg/morph/client/container/put.go
@@ -1,11 +1,12 @@
package container
import (
+ "context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
@@ -14,7 +15,7 @@ import (
// along with sig.Key() and sig.Sign().
//
// Returns error if container is nil.
-func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
+func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) {
data := cnr.Value.Marshal()
d := container.ReadDomain(cnr.Value)
@@ -35,7 +36,7 @@ func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
prm.SetKey(sigV2.GetKey())
prm.SetSignature(sigV2.GetSign())
- err := c.Put(prm)
+ err := c.Put(ctx, prm)
if err != nil {
return nil, err
}
@@ -93,9 +94,7 @@ func (p *PutPrm) SetZone(zone string) {
//
// Returns calculated container identifier and any error
// encountered that caused the saving to interrupt.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) Put(p PutPrm) error {
+func (c *Client) Put(ctx context.Context, p PutPrm) error {
if len(p.sig) == 0 || len(p.key) == 0 {
return errNilArgument
}
@@ -116,9 +115,9 @@ func (c *Client) Put(p PutPrm) error {
prm.SetMethod(method)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", method, err)
+ return fmt.Errorf("invoke method (%s): %w", method, err)
}
return nil
}
diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go
index 016b56f8f..d3eba7639 100644
--- a/pkg/morph/client/frostfs/cheque.go
+++ b/pkg/morph/client/frostfs/cheque.go
@@ -1,6 +1,8 @@
package frostfscontract
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -37,13 +39,13 @@ func (c *ChequePrm) SetLock(lock util.Uint160) {
}
// Cheque invokes `cheque` method of FrostFS contract.
-func (x *Client) Cheque(p ChequePrm) error {
+func (x *Client) Cheque(ctx context.Context, p ChequePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(chequeMethod)
prm.SetArgs(p.id, p.user, p.amount, p.lock)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(prm)
+ _, err := x.client.Invoke(ctx, prm)
return err
}
@@ -66,12 +68,12 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) {
}
// AlphabetUpdate update list of alphabet nodes.
-func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error {
+func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(alphabetUpdateMethod)
prm.SetArgs(p.id, p.pubs)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(prm)
+ _, err := x.client.Invoke(ctx, prm)
return err
}
diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go
index 571915c27..cd6a9849e 100644
--- a/pkg/morph/client/frostfs/client.go
+++ b/pkg/morph/client/frostfs/client.go
@@ -35,7 +35,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err)
+ return nil, fmt.Errorf("create 'frostfs' contract client: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go
index 4c31f42de..61eb03f09 100644
--- a/pkg/morph/client/frostfsid/client.go
+++ b/pkg/morph/client/frostfsid/client.go
@@ -27,7 +27,7 @@ var _ frostfsidcore.SubjectProvider = (*Client)(nil)
func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) {
sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet())
if err != nil {
- return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err)
+ return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go
index 0852f536c..3a789672a 100644
--- a/pkg/morph/client/frostfsid/subject.go
+++ b/pkg/morph/client/frostfsid/subject.go
@@ -1,6 +1,7 @@
package frostfsid
import (
+ "context"
"fmt"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
@@ -14,14 +15,14 @@ const (
methodGetSubjectExtended = "getSubjectExtended"
)
-func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
+func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(methodGetSubject)
prm.SetArgs(addr)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err)
}
structArr, err := checkStackItem(res)
@@ -31,20 +32,20 @@ func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error)
subj, err := frostfsidclient.ParseSubject(structArr)
if err != nil {
- return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
}
return subj, nil
}
-func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
+func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(methodGetSubjectExtended)
prm.SetArgs(addr)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubjectExtended, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err)
}
structArr, err := checkStackItem(res)
@@ -54,7 +55,7 @@ func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.Subject
subj, err := frostfsidclient.ParseSubjectExtended(structArr)
if err != nil {
- return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
}
return subj, nil
@@ -67,7 +68,7 @@ func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error
structArr, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get item array of container (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err)
}
return
}
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index 10ed21582..b9e39c25e 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -2,6 +2,7 @@ package client
import (
"context"
+ "slices"
"sort"
"time"
@@ -42,7 +43,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
newEndpoint := c.endpoints.list[c.endpoints.curr]
cli, act, err := c.newCli(ctx, newEndpoint)
if err != nil {
- c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
+ c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
zap.String("endpoint", newEndpoint.Address),
zap.Error(err),
)
@@ -52,7 +53,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
c.cache.invalidate()
- c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
+ c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
zap.String("endpoint", newEndpoint.Address))
c.client = cli
@@ -99,8 +100,7 @@ mainLoop:
case <-t.C:
c.switchLock.RLock()
- endpointsCopy := make([]Endpoint, len(c.endpoints.list))
- copy(endpointsCopy, c.endpoints.list)
+ endpointsCopy := slices.Clone(c.endpoints.list)
currPriority := c.endpoints.list[c.endpoints.curr].Priority
highestPriority := c.endpoints.list[0].Priority
@@ -122,7 +122,7 @@ mainLoop:
cli, act, err := c.newCli(ctx, e)
if err != nil {
- c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
+ c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
zap.String("endpoint", tryE),
zap.Error(err),
)
@@ -147,7 +147,7 @@ mainLoop:
c.switchLock.Unlock()
- c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC,
+ c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC,
zap.String("endpoint", tryE))
return
diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go
index eafa097e9..de8afbfb5 100644
--- a/pkg/morph/client/netmap/client.go
+++ b/pkg/morph/client/netmap/client.go
@@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("can't create netmap static client: %w", err)
+ return nil, fmt.Errorf("create 'netmap' contract client: %w", err)
}
return &Client{client: sc}, nil
@@ -65,15 +65,7 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go
index 2d19a8193..3f6aed506 100644
--- a/pkg/morph/client/netmap/config.go
+++ b/pkg/morph/client/netmap/config.go
@@ -1,7 +1,7 @@
package netmap
import (
- "errors"
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -24,75 +24,45 @@ const (
// MaxObjectSize receives max object size configuration
// value through the Netmap contract call.
-func (c *Client) MaxObjectSize() (uint64, error) {
- objectSize, err := c.readUInt64Config(MaxObjectSizeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err)
- }
-
- return objectSize, nil
+func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, MaxObjectSizeConfig)
}
// EpochDuration returns number of sidechain blocks per one FrostFS epoch.
-func (c *Client) EpochDuration() (uint64, error) {
- epochDuration, err := c.readUInt64Config(EpochDurationConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err)
- }
-
- return epochDuration, nil
+func (c *Client) EpochDuration(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, EpochDurationConfig)
}
// ContainerFee returns fee paid by container owner to each alphabet node
// for container registration.
-func (c *Client) ContainerFee() (uint64, error) {
- fee, err := c.readUInt64Config(ContainerFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) ContainerFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, ContainerFeeConfig)
}
// ContainerAliasFee returns additional fee paid by container owner to each
// alphabet node for container nice name registration.
-func (c *Client) ContainerAliasFee() (uint64, error) {
- fee, err := c.readUInt64Config(ContainerAliasFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, ContainerAliasFeeConfig)
}
// HomomorphicHashDisabled returns global configuration value of homomorphic hashing
// settings.
//
// Returns (false, nil) if config key is not found in the contract.
-func (c *Client) HomomorphicHashDisabled() (bool, error) {
- return c.readBoolConfig(HomomorphicHashingDisabledKey)
+func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) {
+ return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey)
}
// InnerRingCandidateFee returns global configuration value of fee paid by
// node to be in inner ring candidates list.
-func (c *Client) InnerRingCandidateFee() (uint64, error) {
- fee, err := c.readUInt64Config(IrCandidateFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, IrCandidateFeeConfig)
}
// WithdrawFee returns global configuration value of fee paid by user to
// withdraw assets from FrostFS contract.
-func (c *Client) WithdrawFee() (uint64, error) {
- fee, err := c.readUInt64Config(WithdrawFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, WithdrawFeeConfig)
}
// MaintenanceModeAllowed reads admission of "maintenance" state from the
@@ -100,34 +70,32 @@ func (c *Client) WithdrawFee() (uint64, error) {
// that storage nodes are allowed to switch their state to "maintenance".
//
// By default, maintenance state is disallowed.
-func (c *Client) MaintenanceModeAllowed() (bool, error) {
- return c.readBoolConfig(MaintenanceModeAllowedConfig)
+func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) {
+ return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig)
}
-func (c *Client) readUInt64Config(key string) (uint64, error) {
- v, err := c.config([]byte(key), IntegerAssert)
+func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) {
+ v, err := c.config(ctx, []byte(key))
+ if err != nil {
+ return 0, fmt.Errorf("read netconfig value '%s': %w", key, err)
+ }
+
+ bi, err := v.TryInteger()
if err != nil {
return 0, err
}
-
- // IntegerAssert is guaranteed to return int64 if the error is nil.
- return uint64(v.(int64)), nil
+ return bi.Uint64(), nil
}
// reads boolean value by the given key from the FrostFS network configuration
// stored in the Sidechain. Returns false if key is not presented.
-func (c *Client) readBoolConfig(key string) (bool, error) {
- v, err := c.config([]byte(key), BoolAssert)
+func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) {
+ v, err := c.config(ctx, []byte(key))
if err != nil {
- if errors.Is(err, ErrConfigNotFound) {
- return false, nil
- }
-
- return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err)
+ return false, fmt.Errorf("read netconfig value '%s': %w", key, err)
}
- // BoolAssert is guaranteed to return bool if the error is nil.
- return v.(bool), nil
+ return v.TryBool()
}
// SetConfigPrm groups parameters of SetConfig operation.
@@ -155,13 +123,13 @@ func (s *SetConfigPrm) SetValue(value any) {
}
// SetConfig sets config field.
-func (c *Client) SetConfig(p SetConfigPrm) error {
+func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(setConfigMethod)
prm.SetArgs(p.id, p.key, p.value)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
@@ -198,14 +166,14 @@ type NetworkConfiguration struct {
}
// ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain.
-func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
+func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) {
var res NetworkConfiguration
prm := client.TestInvokePrm{}
prm.SetMethod(configListMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return res, fmt.Errorf("could not perform test invocation (%s): %w",
+ return res, fmt.Errorf("test invoke (%s): %w",
configListMethod, err)
}
@@ -276,22 +244,18 @@ func bytesToBool(val []byte) bool {
return false
}
-// ErrConfigNotFound is returned when the requested key was not found
-// in the network config (returned value is `Null`).
-var ErrConfigNotFound = errors.New("config value not found")
-
// config performs the test invoke of get config value
// method of FrostFS Netmap contract.
//
// Returns ErrConfigNotFound if config key is not found in the contract.
-func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) {
+func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(configMethod)
prm.SetArgs(key)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
configMethod, err)
}
@@ -300,26 +264,7 @@ func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (a
configMethod, ln)
}
- if _, ok := items[0].(stackitem.Null); ok {
- return nil, ErrConfigNotFound
- }
-
- return assert(items[0])
-}
-
-// IntegerAssert converts stack item to int64.
-func IntegerAssert(item stackitem.Item) (any, error) {
- return client.IntFromStackItem(item)
-}
-
-// StringAssert converts stack item to string.
-func StringAssert(item stackitem.Item) (any, error) {
- return client.StringFromStackItem(item)
-}
-
-// BoolAssert converts stack item to bool.
-func BoolAssert(item stackitem.Item) (any, error) {
- return client.BoolFromStackItem(item)
+ return items[0], nil
}
// iterateRecords iterates over all config records and passes them to f.
diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go
index 92d569ae2..8561329ec 100644
--- a/pkg/morph/client/netmap/epoch.go
+++ b/pkg/morph/client/netmap/epoch.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,13 +9,13 @@ import (
// Epoch receives number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) Epoch() (uint64, error) {
+func (c *Client) Epoch(ctx context.Context) (uint64, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(epochMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w",
+ return 0, fmt.Errorf("test invoke (%s): %w",
epochMethod, err)
}
@@ -25,20 +26,20 @@ func (c *Client) Epoch() (uint64, error) {
num, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err)
+ return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err)
}
return uint64(num), nil
}
// LastEpochBlock receives block number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) LastEpochBlock() (uint32, error) {
+func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(lastEpochBlockMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w",
+ return 0, fmt.Errorf("test invoke (%s): %w",
lastEpochBlockMethod, err)
}
@@ -49,7 +50,7 @@ func (c *Client) LastEpochBlock() (uint32, error) {
block, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("could not get number from stack item (%s): %w",
+ return 0, fmt.Errorf("get number from stack item (%s): %w",
lastEpochBlockMethod, err)
}
return uint32(block), nil
diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go
index d6f8c56b2..0e1f9186b 100644
--- a/pkg/morph/client/netmap/innerring.go
+++ b/pkg/morph/client/netmap/innerring.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"crypto/elliptic"
"fmt"
@@ -23,7 +24,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) {
}
// UpdateInnerRing updates inner ring keys.
-func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
+func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
args := make([][]byte, len(p.keys))
for i := range args {
args[i] = p.keys[i].Bytes()
@@ -34,18 +35,18 @@ func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
prm.SetArgs(args)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
return err
}
// GetInnerRingList return current IR list.
-func (c *Client) GetInnerRingList() (keys.PublicKeys, error) {
+func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(innerRingListMethod)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err)
}
return irKeysFromStackItem(prms, innerRingListMethod)
@@ -58,7 +59,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys
irs, err := client.ArrayFromStackItem(stack[0])
if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
+ return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err)
}
irKeys := make(keys.PublicKeys, len(irs))
@@ -78,7 +79,7 @@ const irNodeFixedPrmNumber = 1
func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
prms, err := client.ArrayFromStackItem(prm)
if err != nil {
- return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err)
+ return nil, fmt.Errorf("get stack item array (IRNode): %w", err)
} else if ln := len(prms); ln != irNodeFixedPrmNumber {
return nil, fmt.Errorf(
"unexpected stack item count (IRNode): expected %d, has %d",
@@ -89,7 +90,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
byteKey, err := client.BytesFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err)
+ return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err)
}
return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256())
diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go
index 61bbf5f17..97782fc25 100644
--- a/pkg/morph/client/netmap/netmap.go
+++ b/pkg/morph/client/netmap/netmap.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
@@ -11,14 +12,14 @@ import (
// GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and
// decodes netmap.NetMap from the response.
-func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(epochSnapshotMethod)
invokePrm.SetArgs(epoch)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
epochSnapshotMethod, err)
}
@@ -34,13 +35,13 @@ func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
// GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo
// from the response.
-func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
+func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapCandidatesMethod)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err)
}
if len(res) > 0 {
@@ -51,13 +52,13 @@ func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
}
// NetMap calls "netmap" method and decode netmap.NetMap from the response.
-func (c *Client) NetMap() (*netmap.NetMap, error) {
+func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapMethod)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
netMapMethod, err)
}
@@ -136,11 +137,11 @@ func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error {
default:
return fmt.Errorf("unsupported state %v", node.State)
case netmapcontract.NodeStateOnline:
- dst.SetOnline()
+ dst.SetStatus(netmap.Online)
case netmapcontract.NodeStateOffline:
- dst.SetOffline()
+ dst.SetStatus(netmap.Offline)
case netmapcontract.NodeStateMaintenance:
- dst.SetMaintenance()
+ dst.SetStatus(netmap.Maintenance)
}
return nil
diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go
index a8a306197..e686e271e 100644
--- a/pkg/morph/client/netmap/netmap_test.go
+++ b/pkg/morph/client/netmap/netmap_test.go
@@ -20,11 +20,11 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
switch i % 3 {
default:
- expected[i].SetOffline()
+ expected[i].SetStatus(netmap.Offline)
case int(netmapcontract.NodeStateOnline):
- expected[i].SetOnline()
+ expected[i].SetStatus(netmap.Online)
case int(netmapcontract.NodeStateMaintenance):
- expected[i].SetMaintenance()
+ expected[i].SetStatus(netmap.Maintenance)
}
expected[i].SetPublicKey(pub)
@@ -38,12 +38,12 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
var state int64
- switch {
- case expected[i].IsOnline():
+ switch expected[i].Status() {
+ case netmap.Online:
state = int64(netmapcontract.NodeStateOnline)
- case expected[i].IsOffline():
+ case netmap.Offline:
state = int64(netmapcontract.NodeStateOffline)
- case expected[i].IsMaintenance():
+ case netmap.Maintenance:
state = int64(netmapcontract.NodeStateMaintenance)
}
diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go
index ded386c86..341b20935 100644
--- a/pkg/morph/client/netmap/new_epoch.go
+++ b/pkg/morph/client/netmap/new_epoch.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,14 +9,14 @@ import (
// NewEpoch updates FrostFS epoch number through
// Netmap contract call.
-func (c *Client) NewEpoch(epoch uint64) error {
+func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
- _, err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
+ return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
}
return nil
}
@@ -24,16 +25,16 @@ func (c *Client) NewEpoch(epoch uint64) error {
// control notary transaction internally to ensure all
// nodes produce the same transaction with high probability.
// If vub > 0, vub will be used as valid until block value.
-func (c *Client) NewEpochControl(epoch uint64, vub uint32) (uint32, error) {
+func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
prm.SetControlTX(true)
prm.SetVUB(vub)
- res, err := c.client.Invoke(prm)
+ res, err := c.client.Invoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
+ return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
}
return res.VUB, nil
}
diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go
index 764bbc899..e83acde39 100644
--- a/pkg/morph/client/netmap/peer.go
+++ b/pkg/morph/client/netmap/peer.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"errors"
"fmt"
@@ -24,7 +25,7 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) {
// AddPeer registers peer in FrostFS network through
// Netmap contract call.
-func (c *Client) AddPeer(p AddPeerPrm) error {
+func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
method := addPeerMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -39,15 +40,15 @@ func (c *Client) AddPeer(p AddPeerPrm) error {
prm.SetArgs(p.nodeInfo.Marshal())
prm.InvokePrmOptional = p.InvokePrmOptional
- if _, err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", method, err)
+ if _, err := c.client.Invoke(ctx, prm); err != nil {
+ return fmt.Errorf("invoke method (%s): %w", method, err)
}
return nil
}
// ForceRemovePeer marks the given peer as offline via a notary control transaction.
// If vub > 0, vub will be used as valid until block value.
-func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
+func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
if !c.client.WithNotary() {
return 0, errFailedToRemovePeerWithoutNotary
}
@@ -57,9 +58,9 @@ func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32,
prm.SetControlTX(true)
prm.SetVUB(vub)
- vub, err := c.UpdatePeerState(prm)
+ res, err := c.UpdatePeerState(ctx, prm)
if err != nil {
return 0, fmt.Errorf("updating peer state: %v", err)
}
- return vub, nil
+ return res.VUB, nil
}
diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go
index ba2c26af7..9dbec1a90 100644
--- a/pkg/morph/client/netmap/snapshot.go
+++ b/pkg/morph/client/netmap/snapshot.go
@@ -1,19 +1,22 @@
package netmap
import (
+ "context"
+ "fmt"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
// GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response.
-func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(snapshotMethod)
prm.SetArgs(diff)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err)
}
return DecodeNetMap(res)
diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go
index 7c3a4e8cd..f9f639c19 100644
--- a/pkg/morph/client/netmap/update_state.go
+++ b/pkg/morph/client/netmap/update_state.go
@@ -1,7 +1,7 @@
package netmap
import (
- "fmt"
+ "context"
"git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -36,7 +36,7 @@ func (u *UpdatePeerPrm) SetMaintenance() {
}
// UpdatePeerState changes peer status through Netmap contract call.
-func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) {
+func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) {
method := updateStateMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -55,9 +55,5 @@ func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) {
prm.SetArgs(int64(p.state), p.key)
prm.InvokePrmOptional = p.InvokePrmOptional
- res, err := c.client.Invoke(prm)
- if err != nil {
- return 0, fmt.Errorf("could not invoke smart contract: %w", err)
- }
- return res.VUB, nil
+ return c.client.Invoke(ctx, prm)
}
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index 218f7ad8e..bc00eb889 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -8,14 +8,12 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
)
const (
@@ -37,12 +35,8 @@ const (
NNSPolicyContractName = "policy.frostfs"
)
-var (
- // ErrNNSRecordNotFound means that there is no such record in NNS contract.
- ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
-
- errEmptyResultStack = errors.New("returned result stack is empty")
-)
+// ErrNNSRecordNotFound means that there is no such record in NNS contract.
+var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
// based on alphabet index.
@@ -61,97 +55,36 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) {
return util.Uint160{}, ErrConnectionLost
}
- nnsHash, err := c.NNSHash()
- if err != nil {
- return util.Uint160{}, err
- }
-
- sh, err = nnsResolve(c.client, nnsHash, name)
+ sh, err = nnsResolve(c.nnsReader, name)
if err != nil {
return sh, fmt.Errorf("NNS.resolve: %w", err)
}
return sh, nil
}
-// NNSHash returns NNS contract hash.
-func (c *Client) NNSHash() (util.Uint160, error) {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- if c.inactive {
- return util.Uint160{}, ErrConnectionLost
- }
-
- success := false
- startedAt := time.Now()
-
- defer func() {
- c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt))
- }()
-
- nnsHash := c.cache.nns()
-
- if nnsHash == nil {
- cs, err := c.client.GetContractStateByID(nnsContractID)
- if err != nil {
- return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err)
- }
-
- c.cache.setNNSHash(cs.Hash)
- nnsHash = &cs.Hash
- }
- success = true
- return *nnsHash, nil
-}
-
-func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
- found, err := exists(c, nnsHash, domain)
+func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) {
+ available, err := r.IsAvailable(domain)
if err != nil {
- return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err)
+ return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
}
- if !found {
+ if available {
return nil, ErrNNSRecordNotFound
}
- result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{
- {
- Type: smartcontract.StringType,
- Value: domain,
- },
- {
- Type: smartcontract.IntegerType,
- Value: big.NewInt(int64(nns.TXT)),
- },
- }, nil)
- if err != nil {
- return nil, err
- }
- if result.State != vmstate.Halt.String() {
- return nil, fmt.Errorf("invocation failed: %s", result.FaultException)
- }
- if len(result.Stack) == 0 {
- return nil, errEmptyResultStack
- }
- return result.Stack[0], nil
+ return r.Resolve(domain, big.NewInt(int64(nns.TXT)))
}
-func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) {
- res, err := nnsResolveItem(c, nnsHash, domain)
+func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) {
+ arr, err := nnsResolveItem(r, domain)
if err != nil {
return util.Uint160{}, err
}
- // Parse the result of resolving NNS record.
- // It works with multiple formats (corresponding to multiple NNS versions).
- // If array of hashes is provided, it returns only the first one.
- if arr, ok := res.Value().([]stackitem.Item); ok {
- if len(arr) == 0 {
- return util.Uint160{}, errors.New("NNS record is missing")
- }
- res = arr[0]
+ if len(arr) == 0 {
+ return util.Uint160{}, errors.New("NNS record is missing")
}
- bs, err := res.TryBytes()
+ bs, err := arr[0].TryBytes()
if err != nil {
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
}
@@ -171,33 +104,6 @@ func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (uti
return util.Uint160{}, errors.New("no valid hashes are found")
}
-func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) {
- result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{
- {
- Type: smartcontract.StringType,
- Value: domain,
- },
- }, nil)
- if err != nil {
- return false, err
- }
-
- if len(result.Stack) == 0 {
- return false, errEmptyResultStack
- }
-
- res := result.Stack[0]
-
- available, err := res.TryBool()
- if err != nil {
- return false, fmt.Errorf("malformed response: %w", err)
- }
-
- // not available means that it is taken
- // and, therefore, exists
- return !available, nil
-}
-
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
// Should be called for side-chain client only.
func (c *Client) SetGroupSignerScope() error {
@@ -241,18 +147,12 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
return gKey, nil
}
- nnsHash, err := c.NNSHash()
+ arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName)
if err != nil {
return nil, err
}
- item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName)
- if err != nil {
- return nil, err
- }
-
- arr, ok := item.Value().([]stackitem.Item)
- if !ok || len(arr) == 0 {
+ if len(arr) == 0 {
return nil, errors.New("NNS record is missing")
}
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 616b3b5c3..448702613 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -1,6 +1,7 @@
package client
import (
+ "context"
"crypto/elliptic"
"encoding/binary"
"errors"
@@ -37,8 +38,7 @@ type (
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
- notary util.Uint160
- proxy util.Uint160
+ proxy util.Uint160
}
notaryCfg struct {
@@ -57,16 +57,11 @@ const (
defaultNotaryValidTime = 50
defaultNotaryRoundTime = 100
- notaryBalanceOfMethod = "balanceOf"
- notaryExpirationOfMethod = "expirationOf"
- setDesignateMethod = "designateAsRole"
+ setDesignateMethod = "designateAsRole"
- notaryBalanceErrMsg = "can't fetch notary balance"
notaryNotEnabledPanicMsg = "notary support was not enabled on this client"
)
-var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack")
-
func defaultNotaryConfig(c *Client) *notaryCfg {
return ¬aryCfg{
txValidTime: defaultNotaryValidTime,
@@ -106,7 +101,6 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error {
txValidTime: cfg.txValidTime,
roundTime: cfg.roundTime,
alphabetSource: cfg.alphabetSource,
- notary: notary.Hash,
}
c.notary = notaryCfg
@@ -140,7 +134,7 @@ func (c *Client) ProbeNotary() (res bool) {
// use this function.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uint256, err error) {
+func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -154,16 +148,18 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err)
+ return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err)
}
- currentTill, err := c.depositExpirationOf()
+ r := notary.NewReader(c.rpcActor)
+ currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash())
if err != nil {
- return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err)
+ return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err)
}
- till := max(int64(bc+delta), currentTill)
- return c.depositNotary(amount, till)
+ till := max(int64(bc+delta), int64(currentTill))
+ res, _, err := c.depositNotary(ctx, amount, till)
+ return res, err
}
// DepositEndlessNotary calls notary deposit method. Unlike `DepositNotary`,
@@ -171,12 +167,12 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin
// This allows to avoid ValidAfterDeposit failures.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, err error) {
+func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return util.Uint256{}, ErrConnectionLost
+ return util.Uint256{}, 0, ErrConnectionLost
}
if c.notary == nil {
@@ -184,37 +180,37 @@ func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, e
}
// till value refers to a block height and it is uint32 value in neo-go
- return c.depositNotary(amount, math.MaxUint32)
+ return c.depositNotary(ctx, amount, math.MaxUint32)
}
-func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint256, err error) {
+func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
txHash, vub, err := c.gasToken.Transfer(
c.accAddr,
- c.notary.notary,
+ notary.Hash,
big.NewInt(int64(amount)),
[]any{c.acc.PrivateKey().GetScriptHash(), till})
if err != nil {
if !errors.Is(err, neorpc.ErrAlreadyExists) {
- return util.Uint256{}, fmt.Errorf("can't make notary deposit: %w", err)
+ return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err)
}
// Transaction is already in mempool waiting to be processed.
// This is an expected situation if we restart the service.
- c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade,
+ c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
zap.Error(err))
- return util.Uint256{}, nil
+ return util.Uint256{}, 0, nil
}
- c.logger.Info(logs.ClientNotaryDepositInvoke,
+ c.logger.Info(ctx, logs.ClientNotaryDepositInvoke,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
- return txHash, nil
+ return txHash, vub, nil
}
// GetNotaryDeposit returns deposit of client's account in notary contract.
@@ -235,18 +231,10 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) {
sh := c.acc.PrivateKey().PublicKey().GetScriptHash()
- items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh)
+ r := notary.NewReader(c.rpcActor)
+ bigIntDeposit, err := r.BalanceOf(sh)
if err != nil {
- return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)
- }
-
- if len(items) != 1 {
- return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems))
- }
-
- bigIntDeposit, err := items[0].TryInteger()
- if err != nil {
- return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err))
+ return 0, fmt.Errorf("get notary deposit: %w", err)
}
return bigIntDeposit.Int64(), nil
@@ -273,7 +261,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) {
// committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
+func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -287,10 +275,11 @@ func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
+ ctx,
setDesignateMethod,
nonce,
vub,
@@ -321,7 +310,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) {
// Requires committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
+func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -335,10 +324,11 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
+ ctx,
setDesignateMethod,
nonce,
vub,
@@ -354,19 +344,19 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
// Returns valid until block value.
//
// `nonce` and `vub` are used only if notary is enabled.
-func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) {
+func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return 0, ErrConnectionLost
+ return InvokeRes{}, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(contract, fee, method, args...)
+ return c.Invoke(ctx, contract, fee, method, args...)
}
- return c.notaryInvoke(false, true, contract, nonce, vub, method, args...)
+ return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...)
}
// NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's
@@ -374,19 +364,19 @@ func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce ui
// not expected to be signed by the current node.
//
// Considered to be used by non-IR nodes.
-func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) {
+func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return 0, ErrConnectionLost
+ return InvokeRes{}, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(contract, fee, method, args...)
+ return c.Invoke(ctx, contract, fee, method, args...)
}
- return c.notaryInvoke(false, false, contract, rand.Uint32(), vubP, method, args...)
+ return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...)
}
// NotarySignAndInvokeTX signs and sends notary request that was received from
@@ -403,7 +393,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return fmt.Errorf("could not fetch current alphabet keys: %w", err)
+ return fmt.Errorf("fetch current alphabet keys: %w", err)
}
cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList)
@@ -428,7 +418,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return err
}
- c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked,
+ c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked,
zap.String("tx_hash", mainH.StringLE()),
zap.Uint32("valid_until_block", untilActual),
zap.String("fallback_hash", fbH.StringLE()))
@@ -436,13 +426,13 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return nil
}
-func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error {
+func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error {
designate := c.GetDesignateHash()
- _, err := c.notaryInvoke(true, true, designate, nonce, &vub, method, args...)
+ _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...)
return err
}
-func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) {
+func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
start := time.Now()
success := false
defer func() {
@@ -451,27 +441,27 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return 0, err
+ return InvokeRes{}, err
}
until, err := c.getUntilValue(vub)
if err != nil {
- return 0, err
+ return InvokeRes{}, err
}
cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee)
if err != nil {
- return 0, err
+ return InvokeRes{}, err
}
nAct, err := notary.NewActor(c.client, cosigners, c.acc)
if err != nil {
- return 0, err
+ return InvokeRes{}, err
}
mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != vmstate.Halt.String() {
- return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
+ return ¬HaltStateError{state: r.State, exception: r.FaultException}
}
t.ValidUntilBlock = until
@@ -481,17 +471,17 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
}, args...))
if err != nil && !alreadyOnChainError(err) {
- return 0, err
+ return InvokeRes{}, err
}
- c.logger.Debug(logs.ClientNotaryRequestInvoked,
+ c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked,
zap.String("method", method),
zap.Uint32("valid_until_block", untilActual),
zap.String("tx_hash", mainH.StringLE()),
zap.String("fallback_hash", fbH.StringLE()))
success = true
- return until, nil
+ return InvokeRes{Hash: mainH, VUB: until}, nil
}
func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) {
@@ -525,24 +515,24 @@ func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabet
if ok {
pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err)
+ return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err)
}
acc = notary.FakeSimpleAccount(pub)
} else {
m, pubsBytes, ok := vm.ParseMultiSigContract(script)
if !ok {
- return nil, errors.New("failed to parse verification script of signer #2: unknown witness type")
+ return nil, errors.New("parse verification script of signer #2: unknown witness type")
}
pubs := make(keys.PublicKeys, len(pubsBytes))
for i := range pubs {
pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err)
+ return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err)
}
}
acc, err = notary.FakeMultisigAccount(m, pubs)
if err != nil {
- return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err)
+ return nil, fmt.Errorf("create fake account for signer #2: %w", err)
}
}
}
@@ -618,8 +608,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey())
err := multisigAccount.ConvertMultisig(m, ir)
if err != nil {
- // wrap error as FrostFS-specific since the call is not related to any client
- return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err))
+ return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err)
}
} else {
// alphabet multisig redeem script is
@@ -627,8 +616,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
// inner ring multiaddress witness
multisigAccount, err = notary.FakeMultisigAccount(m, ir)
if err != nil {
- // wrap error as FrostFS-specific since the call is not related to any client
- return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err))
+ return nil, fmt.Errorf("make inner ring multisig wallet: %w", err)
}
}
@@ -638,7 +626,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
func (c *Client) notaryTxValidationLimit() (uint32, error) {
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return 0, fmt.Errorf("can't get current blockchain height: %w", err)
+ return 0, fmt.Errorf("get current blockchain height: %w", err)
}
minTime := bc + c.notary.txValidTime
@@ -647,24 +635,6 @@ func (c *Client) notaryTxValidationLimit() (uint32, error) {
return rounded, nil
}
-func (c *Client) depositExpirationOf() (int64, error) {
- expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash())
- if err != nil {
- return 0, fmt.Errorf("can't invoke method: %w", err)
- }
-
- if len(expirationRes) != 1 {
- return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes))
- }
-
- currentTillBig, err := expirationRes[0].TryInteger()
- if err != nil {
- return 0, fmt.Errorf("can't parse deposit till value: %w", err)
- }
-
- return currentTillBig.Int64(), nil
-}
-
// sigCount returns the number of required signature.
// For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT).
// If committee is true, returns M as N/2+1.
@@ -738,12 +708,12 @@ func alreadyOnChainError(err error) bool {
func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) {
notaryBalance, err := c.GetNotaryDeposit()
if err != nil {
- return 0, fmt.Errorf("could not get notary balance: %w", err)
+ return 0, fmt.Errorf("get notary balance: %w", err)
}
gasBalance, err := c.GasBalance()
if err != nil {
- return 0, fmt.Errorf("could not get GAS balance: %w", err)
+ return 0, fmt.Errorf("get GAS balance: %w", err)
}
if gasBalance == 0 {
@@ -792,12 +762,12 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool)
if hash != nil {
height, err = c.getTransactionHeight(*hash)
if err != nil {
- return 0, 0, fmt.Errorf("could not get transaction height: %w", err)
+ return 0, 0, fmt.Errorf("get transaction height: %w", err)
}
} else {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- return 0, 0, fmt.Errorf("could not get chain height: %w", err)
+ return 0, 0, fmt.Errorf("get chain height: %w", err)
}
}
diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go
index dfcf62b83..c4eb120d2 100644
--- a/pkg/morph/client/static.go
+++ b/pkg/morph/client/static.go
@@ -1,8 +1,10 @@
package client
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -128,7 +130,8 @@ func (i *InvokePrmOptional) SetVUB(v uint32) {
}
type InvokeRes struct {
- VUB uint32
+ Hash util.Uint256
+ VUB uint32
}
// Invoke calls Invoke method of Client with static internal script hash and fee.
@@ -140,9 +143,7 @@ type InvokeRes struct {
//
// If fee for the operation executed using specified method is customized, then StaticClient uses it.
// Otherwise, default fee is used.
-func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) {
- var res InvokeRes
- var err error
+func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) {
var vubP *uint32
if s.tryNotary {
if s.alpha {
@@ -159,7 +160,7 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) {
nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash)
}
if err != nil {
- return InvokeRes{}, fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err)
+ return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err)
}
vubP = &vub
@@ -169,25 +170,23 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) {
vubP = &prm.vub
}
- res.VUB, err = s.client.NotaryInvoke(s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
- return res, err
+ return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
}
if prm.vub > 0 {
vubP = &prm.vub
}
- res.VUB, err = s.client.NotaryInvokeNotAlpha(s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
- return res, err
+ return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
}
- res.VUB, err = s.client.Invoke(
+ return s.client.Invoke(
+ ctx,
s.scScriptHash,
s.fee,
prm.method,
prm.args...,
)
- return res, err
}
// TestInvokePrm groups parameters of the TestInvoke operation.
@@ -207,7 +206,9 @@ func (ti *TestInvokePrm) SetArgs(args ...any) {
}
// TestInvoke calls TestInvoke method of Client with static internal script hash.
-func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) {
+func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method)
+ defer span.End()
return s.client.TestInvoke(
s.scScriptHash,
prm.method,
diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go
index cd55d6bd2..f7b6705a8 100644
--- a/pkg/morph/client/util.go
+++ b/pkg/morph/client/util.go
@@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) {
case stackitem.IntegerT:
n, err := param.TryInteger()
if err != nil {
- return nil, fmt.Errorf("can't parse integer bytes: %w", err)
+ return nil, fmt.Errorf("parse integer bytes: %w", err)
}
return n.Bytes(), nil
@@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) {
func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error {
return func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != HaltState {
- return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
+ return ¬HaltStateError{state: r.State, exception: r.FaultException}
}
t.SystemFee += add
diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go
new file mode 100644
index 000000000..87fcf84b8
--- /dev/null
+++ b/pkg/morph/client/waiter.go
@@ -0,0 +1,51 @@
+package client
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
+)
+
+type waiterClient struct {
+ c *Client
+}
+
+func (w *waiterClient) Context() context.Context {
+ return context.Background()
+}
+
+func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
+ return w.c.GetApplicationLog(hash, trig)
+}
+
+func (w *waiterClient) GetBlockCount() (uint32, error) {
+ return w.c.BlockCount()
+}
+
+func (w *waiterClient) GetVersion() (*result.Version, error) {
+ return w.c.GetVersion()
+}
+
+// WaitTxHalt waits until transaction with the specified hash persists on the blockchain.
+// It also checks execution result to finish in HALT state.
+func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error {
+ w, err := waiter.NewPollingBased(&waiterClient{c: c})
+ if err != nil {
+ return fmt.Errorf("create tx waiter: %w", err)
+ }
+
+ res, err := w.WaitAny(ctx, vub, h)
+ if err != nil {
+ return fmt.Errorf("wait until tx persists: %w", err)
+ }
+
+ if res.VMState.HasFlag(vmstate.Halt) {
+ return nil
+ }
+ return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}
+}
diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go
index 062a2a886..99f80584a 100644
--- a/pkg/morph/event/balance/lock.go
+++ b/pkg/morph/event/balance/lock.go
@@ -3,7 +3,7 @@ package balance
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -47,61 +47,17 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash }
// ParseLock from notification into lock structure.
func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Lock
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var le balance.LockEvent
+ if err := le.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse balance.LockEvent: %w", err)
}
- if ln := len(params); ln != 5 {
- return nil, event.WrongNumberOfParameters(5, ln)
- }
-
- // parse id
- ev.id, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get lock id: %w", err)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get lock user value: %w", err)
- }
-
- ev.user, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err)
- }
-
- // parse lock account
- lock, err := client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get lock account value: %w", err)
- }
-
- ev.lock, err = util.Uint160DecodeBytesBE(lock)
- if err != nil {
- return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err)
- }
-
- // parse amount
- ev.amount, err = client.IntFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get lock amount: %w", err)
- }
-
- // parse until deadline
- ev.until, err = client.IntFromStackItem(params[4])
- if err != nil {
- return nil, fmt.Errorf("could not get lock deadline: %w", err)
- }
-
- ev.txHash = e.Container
-
- return ev, nil
+ return Lock{
+ id: le.TxID,
+ user: le.From,
+ lock: le.To,
+ amount: le.Amount.Int64(),
+ until: le.Until.Int64(),
+ txHash: e.Container,
+ }, nil
}
diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go
index 9199bcd55..87b91aede 100644
--- a/pkg/morph/event/balance/lock_test.go
+++ b/pkg/morph/event/balance/lock_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -28,7 +27,7 @@ func TestParseLock(t *testing.T) {
}
_, err := ParseLock(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go
index a206307f8..d28f6d521 100644
--- a/pkg/morph/event/container/delete.go
+++ b/pkg/morph/event/container/delete.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -58,28 +58,14 @@ func (DeleteSuccess) MorphEvent() {}
// ParseDeleteSuccess decodes notification event thrown by Container contract into
// DeleteSuccess and returns it as event.Event.
func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- items, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
+ var dse container.DeleteSuccessEvent
+ if err := dse.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err)
}
- const expectedItemNumDeleteSuccess = 1
-
- if ln := len(items); ln != expectedItemNumDeleteSuccess {
- return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln)
- }
-
- binID, err := client.BytesFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("parse container ID item: %w", err)
- }
-
- var res DeleteSuccess
-
- err = res.ID.Decode(binID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
- }
-
- return res, nil
+ var cnr cid.ID
+ cnr.SetSHA256(dse.ContainerID)
+ return DeleteSuccess{
+ ID: cnr,
+ }, nil
}
diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go
index 627c5fcf5..62e7d7277 100644
--- a/pkg/morph/event/container/delete_test.go
+++ b/pkg/morph/event/container/delete_test.go
@@ -4,7 +4,6 @@ import (
"crypto/sha256"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -18,7 +17,7 @@ func TestParseDeleteSuccess(t *testing.T) {
}
_, err := ParseDeleteSuccess(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong container parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go
deleted file mode 100644
index 4168d8842..000000000
--- a/pkg/morph/event/container/eacl.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package container
-
-import (
- "github.com/nspcc-dev/neo-go/pkg/network/payload"
-)
-
-// SetEACL represents structure of notification about
-// modified eACL table coming from FrostFS Container contract.
-type SetEACL struct {
- TableValue []byte
- SignatureValue []byte
- PublicKeyValue []byte
- TokenValue []byte
-
- // For notary notifications only.
- // Contains raw transactions of notary request.
- NotaryRequestValue *payload.P2PNotaryRequest
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (SetEACL) MorphEvent() {}
-
-// Table returns returns eACL table in a binary FrostFS API format.
-func (x SetEACL) Table() []byte {
- return x.TableValue
-}
-
-// Signature returns signature of the binary table.
-func (x SetEACL) Signature() []byte {
- return x.SignatureValue
-}
-
-// PublicKey returns public keys of container
-// owner in a binary format.
-func (x SetEACL) PublicKey() []byte {
- return x.PublicKeyValue
-}
-
-// SessionToken returns binary token of the session
-// within which the eACL was set.
-func (x SetEACL) SessionToken() []byte {
- return x.TokenValue
-}
-
-// NotaryRequest returns raw notary request if notification
-// was received via notary service. Otherwise, returns nil.
-func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest {
- return x.NotaryRequestValue
-}
-
-const expectedItemNumEACL = 4
diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go
deleted file mode 100644
index a4fe7c966..000000000
--- a/pkg/morph/event/container/eacl_notary.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package container
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
-)
-
-func (x *SetEACL) setTable(v []byte) {
- if v != nil {
- x.TableValue = v
- }
-}
-
-func (x *SetEACL) setSignature(v []byte) {
- if v != nil {
- x.SignatureValue = v
- }
-}
-
-func (x *SetEACL) setPublicKey(v []byte) {
- if v != nil {
- x.PublicKeyValue = v
- }
-}
-
-func (x *SetEACL) setToken(v []byte) {
- if v != nil {
- x.TokenValue = v
- }
-}
-
-var setEACLFieldSetters = []func(*SetEACL, []byte){
- // order on stack is reversed
- (*SetEACL).setToken,
- (*SetEACL).setPublicKey,
- (*SetEACL).setSignature,
- (*SetEACL).setTable,
-}
-
-const (
- // SetEACLNotaryEvent is method name for container EACL operations
- // in `Container` contract. Is used as identificator for notary
- // EACL changing requests.
- SetEACLNotaryEvent = "setEACL"
-)
-
-// ParseSetEACLNotary from NotaryEvent into container event structure.
-func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) {
- var (
- ev SetEACL
- currentOp opcode.Opcode
- )
-
- fieldNum := 0
-
- for _, op := range ne.Params() {
- currentOp = op.Code()
-
- switch {
- case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4:
- if fieldNum == expectedItemNumEACL {
- return nil, event.UnexpectedArgNumErr(SetEACLNotaryEvent)
- }
-
- setEACLFieldSetters[fieldNum](&ev, op.Param())
- fieldNum++
- default:
- return nil, event.UnexpectedOpcode(SetEACLNotaryEvent, op.Code())
- }
- }
-
- ev.NotaryRequestValue = ne.Raw()
-
- return ev, nil
-}
diff --git a/pkg/morph/event/container/estimates.go b/pkg/morph/event/container/estimates.go
deleted file mode 100644
index 9fd21e2b5..000000000
--- a/pkg/morph/event/container/estimates.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package container
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-// StartEstimation structure of container.StartEstimation notification from
-// morph chain.
-type StartEstimation struct {
- epoch uint64
-}
-
-// StopEstimation structure of container.StopEstimation notification from
-// morph chain.
-type StopEstimation struct {
- epoch uint64
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (StartEstimation) MorphEvent() {}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (StopEstimation) MorphEvent() {}
-
-// Epoch returns epoch value for which to start container size estimation.
-func (s StartEstimation) Epoch() uint64 { return s.epoch }
-
-// Epoch returns epoch value for which to stop container size estimation.
-func (s StopEstimation) Epoch() uint64 { return s.epoch }
-
-// ParseStartEstimation from notification into container event structure.
-func ParseStartEstimation(e *state.ContainedNotificationEvent) (event.Event, error) {
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- epoch, err := parseEstimation(params)
- if err != nil {
- return nil, err
- }
-
- return StartEstimation{epoch: epoch}, nil
-}
-
-// ParseStopEstimation from notification into container event structure.
-func ParseStopEstimation(e *state.ContainedNotificationEvent) (event.Event, error) {
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- epoch, err := parseEstimation(params)
- if err != nil {
- return nil, err
- }
-
- return StopEstimation{epoch: epoch}, nil
-}
-
-func parseEstimation(params []stackitem.Item) (uint64, error) {
- if ln := len(params); ln != 1 {
- return 0, event.WrongNumberOfParameters(1, ln)
- }
-
- // parse container
- epoch, err := client.IntFromStackItem(params[0])
- if err != nil {
- return 0, fmt.Errorf("could not get estimation epoch: %w", err)
- }
-
- return uint64(epoch), nil
-}
diff --git a/pkg/morph/event/container/estimates_test.go b/pkg/morph/event/container/estimates_test.go
deleted file mode 100644
index be46e62c4..000000000
--- a/pkg/morph/event/container/estimates_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package container
-
-import (
- "math/big"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestStartEstimation(t *testing.T) {
- var epochNum uint64 = 100
- epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum))
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseStartEstimation(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
- })
-
- t.Run("wrong estimation parameter", func(t *testing.T) {
- _, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{
- epochItem,
- }))
-
- require.NoError(t, err)
-
- require.Equal(t, StartEstimation{
- epochNum,
- }, ev)
- })
-}
-
-func TestStopEstimation(t *testing.T) {
- var epochNum uint64 = 100
- epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum))
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseStopEstimation(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
- })
-
- t.Run("wrong estimation parameter", func(t *testing.T) {
- _, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{
- epochItem,
- }))
-
- require.NoError(t, err)
-
- require.Equal(t, StopEstimation{
- epochNum,
- }, ev)
- })
-}
diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go
index 335034bf3..b09394ba4 100644
--- a/pkg/morph/event/container/put.go
+++ b/pkg/morph/event/container/put.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -78,33 +78,14 @@ func (PutSuccess) MorphEvent() {}
// ParsePutSuccess decodes notification event thrown by Container contract into
// PutSuccess and returns it as event.Event.
func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- items, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
+ var pse container.PutSuccessEvent
+ if err := pse.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err)
}
- const expectedItemNumPutSuccess = 2
-
- if ln := len(items); ln != expectedItemNumPutSuccess {
- return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln)
- }
-
- binID, err := client.BytesFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("parse container ID item: %w", err)
- }
-
- _, err = client.BytesFromStackItem(items[1])
- if err != nil {
- return nil, fmt.Errorf("parse public key item: %w", err)
- }
-
- var res PutSuccess
-
- err = res.ID.Decode(binID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
- }
-
- return res, nil
+ var cnr cid.ID
+ cnr.SetSHA256(pse.ContainerID)
+ return PutSuccess{
+ ID: cnr,
+ }, nil
}
diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go
index f5779ced6..6b2ee7b0a 100644
--- a/pkg/morph/event/container/put_notary.go
+++ b/pkg/morph/event/container/put_notary.go
@@ -46,7 +46,7 @@ const (
// put container requests.
PutNotaryEvent = "put"
- // PutNotaryEvent is an ID of notary "put named container" notification.
+ // PutNamedNotaryEvent is an ID of notary "put named container" notification.
PutNamedNotaryEvent = "putNamed"
)
diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go
index 3622f9943..dd5c7ea93 100644
--- a/pkg/morph/event/container/put_test.go
+++ b/pkg/morph/event/container/put_test.go
@@ -4,8 +4,8 @@ import (
"crypto/sha256"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -17,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) {
}
_, err := ParsePutSuccess(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong container ID parameter", func(t *testing.T) {
@@ -35,18 +35,30 @@ func TestParsePutSuccess(t *testing.T) {
id.Encode(binID)
t.Run("wrong public key parameter", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewMap(),
- }))
+ t.Run("wrong type", func(t *testing.T) {
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewMap(),
+ }))
- require.Error(t, err)
+ require.Error(t, err)
+ })
+ t.Run("garbage data", func(t *testing.T) {
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewByteArray([]byte("key")),
+ }))
+ require.Error(t, err)
+ })
})
t.Run("correct behavior", func(t *testing.T) {
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(binID),
- stackitem.NewByteArray([]byte("key")),
+ stackitem.NewByteArray(pk.PublicKey().Bytes()),
}))
require.NoError(t, err)
diff --git a/pkg/morph/event/container/eacl_test.go b/pkg/morph/event/container/util_test.go
similarity index 100%
rename from pkg/morph/event/container/eacl_test.go
rename to pkg/morph/event/container/util_test.go
diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go
index eae2a23f5..cf56464b8 100644
--- a/pkg/morph/event/frostfs/cheque.go
+++ b/pkg/morph/event/frostfs/cheque.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,53 +34,20 @@ func (c Cheque) LockAccount() util.Uint160 { return c.LockValue }
// ParseCheque from notification into cheque structure.
func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Cheque
- err error
- )
+ var ce frostfs.ChequeEvent
+ if err := ce.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err)
+ }
- params, err := event.ParseStackArray(e)
+ lock, err := util.Uint160DecodeBytesBE(ce.LockAccount)
if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err)
}
- if ln := len(params); ln != 4 {
- return nil, event.WrongNumberOfParameters(4, ln)
- }
-
- // parse id
- ev.IDValue, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque id: %w", err)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque user: %w", err)
- }
-
- ev.UserValue, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err)
- }
-
- // parse amount
- ev.AmountValue, err = client.IntFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque amount: %w", err)
- }
-
- // parse lock account
- lock, err := client.BytesFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque lock account: %w", err)
- }
-
- ev.LockValue, err = util.Uint160DecodeBytesBE(lock)
- if err != nil {
- return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err)
- }
-
- return ev, nil
+ return Cheque{
+ IDValue: ce.Id,
+ AmountValue: ce.Amount.Int64(),
+ UserValue: ce.User,
+ LockValue: lock,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go
index ab177757f..d92b7922b 100644
--- a/pkg/morph/event/frostfs/cheque_test.go
+++ b/pkg/morph/event/frostfs/cheque_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -27,7 +26,7 @@ func TestParseCheque(t *testing.T) {
}
_, err := ParseCheque(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go
index 4c87634c2..805e80f3c 100644
--- a/pkg/morph/event/frostfs/config.go
+++ b/pkg/morph/event/frostfs/config.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -36,39 +36,15 @@ func (u Config) Key() []byte { return u.KeyValue }
func (u Config) Value() []byte { return u.ValueValue }
func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Config
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var sce frostfs.SetConfigEvent
+ if err := sce.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err)
}
- if ln := len(params); ln != 3 {
- return nil, event.WrongNumberOfParameters(3, ln)
- }
-
- // parse id
- ev.IDValue, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get config update id: %w", err)
- }
-
- // parse key
- ev.KeyValue, err = client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get config key: %w", err)
- }
-
- // parse value
- ev.ValueValue, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get config value: %w", err)
- }
-
- ev.TxHashValue = e.Container
-
- return ev, nil
+ return Config{
+ KeyValue: sce.Key,
+ ValueValue: sce.Value,
+ IDValue: sce.Id,
+ TxHashValue: e.Container,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go
index dcd4201e4..8acc8c15c 100644
--- a/pkg/morph/event/frostfs/config_test.go
+++ b/pkg/morph/event/frostfs/config_test.go
@@ -3,7 +3,6 @@ package frostfs
import (
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -21,7 +20,7 @@ func TestParseConfig(t *testing.T) {
}
_, err := ParseConfig(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong first parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go
index d8a3b82f0..fcb01577e 100644
--- a/pkg/morph/event/frostfs/deposit.go
+++ b/pkg/morph/event/frostfs/deposit.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,50 +34,15 @@ func (d Deposit) Amount() int64 { return d.AmountValue }
// ParseDeposit notification into deposit structure.
func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ev Deposit
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var de frostfs.DepositEvent
+ if err := de.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err)
}
- if ln := len(params); ln != 4 {
- return nil, event.WrongNumberOfParameters(4, ln)
- }
-
- // parse from
- from, err := client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit sender: %w", err)
- }
-
- ev.FromValue, err = util.Uint160DecodeBytesBE(from)
- if err != nil {
- return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err)
- }
-
- // parse amount
- ev.AmountValue, err = client.IntFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit amount: %w", err)
- }
-
- // parse to
- to, err := client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit receiver: %w", err)
- }
-
- ev.ToValue, err = util.Uint160DecodeBytesBE(to)
- if err != nil {
- return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err)
- }
-
- // parse id
- ev.IDValue, err = client.BytesFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit id: %w", err)
- }
-
- return ev, nil
+ return Deposit{
+ IDValue: de.TxHash[:],
+ AmountValue: de.Amount.Int64(),
+ FromValue: de.From,
+ ToValue: de.Receiver,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go
index f279a7f9c..38d3e61f6 100644
--- a/pkg/morph/event/frostfs/deposit_test.go
+++ b/pkg/morph/event/frostfs/deposit_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -12,7 +11,7 @@ import (
func TestParseDeposit(t *testing.T) {
var (
- id = []byte("Hello World")
+ id = util.Uint256{0, 1, 2, 3}
from = util.Uint160{0x1, 0x2, 0x3}
to = util.Uint160{0x3, 0x2, 0x1}
@@ -26,7 +25,7 @@ func TestParseDeposit(t *testing.T) {
}
_, err := ParseDeposit(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong from parameter", func(t *testing.T) {
@@ -72,12 +71,12 @@ func TestParseDeposit(t *testing.T) {
stackitem.NewByteArray(from.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
stackitem.NewByteArray(to.BytesBE()),
- stackitem.NewByteArray(id),
+ stackitem.NewByteArray(id[:]),
}))
require.NoError(t, err)
require.Equal(t, Deposit{
- IDValue: id,
+ IDValue: id[:],
AmountValue: amount,
FromValue: from,
ToValue: to,
diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go
deleted file mode 100644
index 62203540f..000000000
--- a/pkg/morph/event/frostfs/ir_update.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package frostfs
-
-import (
- "crypto/elliptic"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-type UpdateInnerRing struct {
- keys []*keys.PublicKey
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (UpdateInnerRing) MorphEvent() {}
-
-func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys }
-
-func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) {
- var (
- ev UpdateInnerRing
- err error
- )
-
- if ln := len(params); ln != 1 {
- return nil, event.WrongNumberOfParameters(1, ln)
- }
-
- // parse keys
- irKeys, err := client.ArrayFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get updated inner ring keys: %w", err)
- }
-
- ev.keys = make([]*keys.PublicKey, 0, len(irKeys))
- for i := range irKeys {
- rawKey, err := client.BytesFromStackItem(irKeys[i])
- if err != nil {
- return nil, fmt.Errorf("could not get updated inner ring public key: %w", err)
- }
-
- key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
- if err != nil {
- return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err)
- }
-
- ev.keys = append(ev.keys, key)
- }
-
- return ev, nil
-}
diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go
deleted file mode 100644
index fae87e5f9..000000000
--- a/pkg/morph/event/frostfs/ir_update_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package frostfs
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func genKey(t *testing.T) *keys.PrivateKey {
- priv, err := keys.NewPrivateKey()
- require.NoError(t, err)
- return priv
-}
-
-func TestParseUpdateInnerRing(t *testing.T) {
- publicKeys := []*keys.PublicKey{
- genKey(t).PublicKey(),
- genKey(t).PublicKey(),
- genKey(t).PublicKey(),
- }
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseUpdateInnerRing(prms)
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
- })
-
- t.Run("wrong first parameter", func(t *testing.T) {
- _, err := ParseUpdateInnerRing([]stackitem.Item{
- stackitem.NewMap(),
- })
-
- require.Error(t, err)
- })
-
- t.Run("correct", func(t *testing.T) {
- ev, err := ParseUpdateInnerRing([]stackitem.Item{
- stackitem.NewArray([]stackitem.Item{
- stackitem.NewByteArray(publicKeys[0].Bytes()),
- stackitem.NewByteArray(publicKeys[1].Bytes()),
- stackitem.NewByteArray(publicKeys[2].Bytes()),
- }),
- })
- require.NoError(t, err)
-
- require.Equal(t, UpdateInnerRing{
- keys: publicKeys,
- }, ev)
- })
-}
diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go
index f48067f86..2568b6512 100644
--- a/pkg/morph/event/frostfs/withdraw.go
+++ b/pkg/morph/event/frostfs/withdraw.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -30,39 +30,14 @@ func (w Withdraw) Amount() int64 { return w.AmountValue }
// ParseWithdraw notification into withdraw structure.
func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ev Withdraw
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var we frostfs.WithdrawEvent
+ if err := we.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err)
}
- if ln := len(params); ln != 3 {
- return nil, event.WrongNumberOfParameters(3, ln)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw user: %w", err)
- }
-
- ev.UserValue, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err)
- }
-
- // parse amount
- ev.AmountValue, err = client.IntFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw amount: %w", err)
- }
-
- // parse id
- ev.IDValue, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw id: %w", err)
- }
-
- return ev, nil
+ return Withdraw{
+ IDValue: we.TxHash[:],
+ AmountValue: we.Amount.Int64(),
+ UserValue: we.User,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go
index 33435d19a..e382305e6 100644
--- a/pkg/morph/event/frostfs/withdraw_test.go
+++ b/pkg/morph/event/frostfs/withdraw_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -12,7 +11,7 @@ import (
func TestParseWithdraw(t *testing.T) {
var (
- id = []byte("Hello World")
+ id = util.Uint256{1, 2, 3}
user = util.Uint160{0x1, 0x2, 0x3}
amount int64 = 10
@@ -25,7 +24,7 @@ func TestParseWithdraw(t *testing.T) {
}
_, err := ParseWithdraw(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong user parameter", func(t *testing.T) {
@@ -59,12 +58,12 @@ func TestParseWithdraw(t *testing.T) {
ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(user.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
- stackitem.NewByteArray(id),
+ stackitem.NewByteArray(id[:]),
}))
require.NoError(t, err)
require.Equal(t, Withdraw{
- IDValue: id,
+ IDValue: id[:],
AmountValue: amount,
UserValue: user,
}, ev)
diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go
index 182b4667e..55a514ff1 100644
--- a/pkg/morph/event/handlers.go
+++ b/pkg/morph/event/handlers.go
@@ -1,32 +1,26 @@
package event
import (
+ "context"
+
"github.com/nspcc-dev/neo-go/pkg/core/block"
+ "github.com/nspcc-dev/neo-go/pkg/util"
)
// Handler is an Event processing function.
-type Handler func(Event)
+type Handler func(context.Context, Event)
// BlockHandler is a chain block processing function.
-type BlockHandler func(*block.Block)
+type BlockHandler func(context.Context, *block.Block)
// NotificationHandlerInfo is a structure that groups
// the parameters of the handler of particular
// contract event.
type NotificationHandlerInfo struct {
- scriptHashWithType
-
- h Handler
-}
-
-// SetHandler is an event handler setter.
-func (s *NotificationHandlerInfo) SetHandler(v Handler) {
- s.h = v
-}
-
-// Handler returns an event handler.
-func (s NotificationHandlerInfo) Handler() Handler {
- return s.h
+ Contract util.Uint160
+ Type Type
+ Parser NotificationParser
+ Handlers []Handler
}
// NotaryHandlerInfo is a structure that groups
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index dd3c7d216..e5cdfeef7 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -33,13 +33,6 @@ type Listener interface {
// it could not be started.
ListenWithError(context.Context, chan<- error)
- // SetNotificationParser must set the parser of particular contract event.
- //
- // Parser of each event must be set once. All parsers must be set before Listen call.
- //
- // Must ignore nil parsers and all calls after listener has been started.
- SetNotificationParser(NotificationParserInfo)
-
// RegisterNotificationHandler must register the event handler for particular notification event of contract.
//
// The specified handler must be called after each capture and parsing of the event.
@@ -100,8 +93,6 @@ type listener struct {
startOnce, stopOnce sync.Once
- started bool
-
notificationParsers map[scriptHashWithType]NotificationParser
notificationHandlers map[scriptHashWithType][]Handler
@@ -120,7 +111,7 @@ type listener struct {
pool *ants.Pool
}
-const newListenerFailMsg = "could not instantiate Listener"
+const newListenerFailMsg = "instantiate Listener"
var (
errNilLogger = errors.New("nil logger")
@@ -143,11 +134,8 @@ func (l *listener) Listen(ctx context.Context) {
l.startOnce.Do(func() {
l.wg.Add(1)
defer l.wg.Done()
- if err := l.listen(ctx, nil); err != nil {
- l.log.Error(logs.EventCouldNotStartListenToEvents,
- zap.String("error", err.Error()),
- )
- }
+
+ l.listen(ctx, nil)
})
}
@@ -161,26 +149,17 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
l.startOnce.Do(func() {
l.wg.Add(1)
defer l.wg.Done()
- if err := l.listen(ctx, intError); err != nil {
- l.log.Error(logs.EventCouldNotStartListenToEvents,
- zap.String("error", err.Error()),
- )
- l.sendError(ctx, intError, err)
- }
+
+ l.listen(ctx, intError)
})
}
-func (l *listener) listen(ctx context.Context, intError chan<- error) error {
- // mark listener as started
- l.started = true
-
+func (l *listener) listen(ctx context.Context, intError chan<- error) {
subErrCh := make(chan error)
go l.subscribe(subErrCh)
l.listenLoop(ctx, intError, subErrCh)
-
- return nil
}
func (l *listener) subscribe(errCh chan error) {
@@ -192,7 +171,7 @@ func (l *listener) subscribe(errCh chan error) {
// fill the list with the contracts with set event parsers.
l.mtx.RLock()
for hashType := range l.notificationParsers {
- scHash := hashType.ScriptHash()
+ scHash := hashType.Hash
// prevent repetitions
for _, hash := range hashes {
@@ -201,26 +180,26 @@ func (l *listener) subscribe(errCh chan error) {
}
}
- hashes = append(hashes, hashType.ScriptHash())
+ hashes = append(hashes, hashType.Hash)
}
l.mtx.RUnlock()
err := l.subscriber.SubscribeForNotification(hashes...)
if err != nil {
- errCh <- fmt.Errorf("could not subscribe for notifications: %w", err)
+ errCh <- fmt.Errorf("subscribe for notifications: %w", err)
return
}
if len(l.blockHandlers) > 0 {
if err = l.subscriber.BlockNotifications(); err != nil {
- errCh <- fmt.Errorf("could not subscribe for blocks: %w", err)
+ errCh <- fmt.Errorf("subscribe for blocks: %w", err)
return
}
}
if l.listenNotary {
if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
- errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err)
+ errCh <- fmt.Errorf("subscribe for notary requests: %w", err)
return
}
}
@@ -234,7 +213,7 @@ func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error
// in the same routine when shutting down node.
select {
case <-ctx.Done():
- l.log.Info(logs.EventStopEventListenerByContext,
+ l.log.Info(ctx, logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
return false
@@ -251,81 +230,81 @@ loop:
select {
case err := <-subErrCh:
if !l.sendError(ctx, intErr, err) {
- l.log.Error(logs.EventStopEventListenerByError, zap.Error(err))
+ l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err))
}
break loop
case <-ctx.Done():
- l.log.Info(logs.EventStopEventListenerByContext,
+ l.log.Info(ctx, logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
break loop
case notifyEvent, ok := <-chs.NotificationsCh:
if !ok {
- l.log.Warn(logs.EventStopEventListenerByNotificationChannel)
+ l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel)
l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated)
break loop
} else if notifyEvent == nil {
- l.log.Warn(logs.EventNilNotificationEventWasCaught)
+ l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught)
continue loop
}
- l.handleNotifyEvent(notifyEvent)
+ l.handleNotifyEvent(ctx, notifyEvent)
case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
- l.log.Warn(logs.EventStopEventListenerByNotaryChannel)
+ l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel)
l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated)
break loop
} else if notaryEvent == nil {
- l.log.Warn(logs.EventNilNotaryEventWasCaught)
+ l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught)
continue loop
}
- l.handleNotaryEvent(notaryEvent)
+ l.handleNotaryEvent(ctx, notaryEvent)
case b, ok := <-chs.BlockCh:
if !ok {
- l.log.Warn(logs.EventStopEventListenerByBlockChannel)
+ l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel)
l.sendError(ctx, intErr, errBlockNotificationChannelClosed)
break loop
} else if b == nil {
- l.log.Warn(logs.EventNilBlockWasCaught)
+ l.log.Warn(ctx, logs.EventNilBlockWasCaught)
continue loop
}
- l.handleBlockEvent(b)
+ l.handleBlockEvent(ctx, b)
}
}
}
-func (l *listener) handleBlockEvent(b *block.Block) {
+func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) {
if err := l.pool.Submit(func() {
for i := range l.blockHandlers {
- l.blockHandlers[i](b)
+ l.blockHandlers[i](ctx, b)
}
}); err != nil {
- l.log.Warn(logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) {
+func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotary(notaryEvent)
+ l.parseAndHandleNotary(ctx, notaryEvent)
}); err != nil {
- l.log.Warn(logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotification(notifyEvent)
+ l.parseAndHandleNotification(ctx, notifyEvent)
}); err != nil {
- l.log.Warn(logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
log := l.log.With(
zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()),
)
@@ -338,16 +317,14 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
)
// get the event parser
- keyEvent := scriptHashWithType{}
- keyEvent.SetScriptHash(notifyEvent.ScriptHash)
- keyEvent.SetType(typEvent)
+ keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent}
l.mtx.RLock()
parser, ok := l.notificationParsers[keyEvent]
l.mtx.RUnlock()
if !ok {
- log.Debug(logs.EventEventParserNotSet)
+ log.Debug(ctx, logs.EventEventParserNotSet)
return
}
@@ -355,8 +332,8 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
// parse the notification event
event, err := parser(notifyEvent)
if err != nil {
- log.Warn(logs.EventCouldNotParseNotificationEvent,
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotParseNotificationEvent,
+ zap.Error(err),
)
return
@@ -368,7 +345,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
l.mtx.RUnlock()
if len(handlers) == 0 {
- log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
@@ -376,11 +353,11 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
}
for _, handler := range handlers {
- handler(event)
+ handler(ctx, event)
}
}
-func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
+func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) {
// prepare the notary event
notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest)
if err != nil {
@@ -388,14 +365,14 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
switch {
case errors.Is(err, ErrTXAlreadyHandled):
case errors.As(err, &expErr):
- l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent,
- zap.String("error", err.Error()),
+ l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent,
+ zap.Error(err),
zap.Uint32("current_block_height", expErr.CurrentBlockHeight),
zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight),
)
default:
- l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent,
- zap.String("error", err.Error()),
+ l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent,
+ zap.Error(err),
)
}
@@ -418,7 +395,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
l.mtx.RUnlock()
if !ok {
- log.Debug(logs.EventNotaryParserNotSet)
+ log.Debug(ctx, logs.EventNotaryParserNotSet)
return
}
@@ -426,8 +403,8 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
// parse the notary event
event, err := parser(notaryEvent)
if err != nil {
- log.Warn(logs.EventCouldNotParseNotaryEvent,
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotParseNotaryEvent,
+ zap.Error(err),
)
return
@@ -439,47 +416,14 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
l.mtx.RUnlock()
if !ok {
- log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
return
}
- handler(event)
-}
-
-// SetNotificationParser sets the parser of particular contract event.
-//
-// Ignores nil and already set parsers.
-// Ignores the parser if listener is started.
-func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
- log := l.log.With(
- zap.String("contract", pi.ScriptHash().StringLE()),
- zap.Stringer("event_type", pi.getType()),
- )
-
- parser := pi.parser()
- if parser == nil {
- log.Info(logs.EventIgnoreNilEventParser)
- return
- }
-
- l.mtx.Lock()
- defer l.mtx.Unlock()
-
- // check if the listener was started
- if l.started {
- log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser)
- return
- }
-
- // add event parser
- if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok {
- l.notificationParsers[pi.scriptHashWithType] = pi.parser()
- }
-
- log.Debug(logs.EventRegisteredNewEventParser)
+ handler(ctx, event)
}
// RegisterNotificationHandler registers the handler for particular notification event of contract.
@@ -488,35 +432,23 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
// Ignores handlers of event without parser.
func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
log := l.log.With(
- zap.String("contract", hi.ScriptHash().StringLE()),
- zap.Stringer("event_type", hi.GetType()),
+ zap.String("contract", hi.Contract.StringLE()),
+ zap.Stringer("event_type", hi.Type),
)
- handler := hi.Handler()
- if handler == nil {
- log.Warn(logs.EventIgnoreNilEventHandler)
- return
- }
-
// check if parser was set
- l.mtx.RLock()
- _, ok := l.notificationParsers[hi.scriptHashWithType]
- l.mtx.RUnlock()
-
- if !ok {
- log.Warn(logs.EventIgnoreHandlerOfEventWoParser)
- return
- }
-
- // add event handler
l.mtx.Lock()
- l.notificationHandlers[hi.scriptHashWithType] = append(
- l.notificationHandlers[hi.scriptHashWithType],
- hi.Handler(),
- )
- l.mtx.Unlock()
+ defer l.mtx.Unlock()
- log.Debug(logs.EventRegisteredNewEventHandler)
+ k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type}
+
+ l.notificationParsers[k] = hi.Parser
+ l.notificationHandlers[k] = append(
+ l.notificationHandlers[k],
+ hi.Handlers...,
+ )
+
+ log.Debug(context.Background(), logs.EventRegisteredNewEventHandler)
}
// EnableNotarySupport enables notary request listening. Passed hash is
@@ -555,27 +487,15 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
zap.Stringer("notary_type", pi.RequestType()),
)
- parser := pi.parser()
- if parser == nil {
- log.Info(logs.EventIgnoreNilNotaryEventParser)
- return
- }
-
l.mtx.Lock()
defer l.mtx.Unlock()
- // check if the listener was started
- if l.started {
- log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser)
- return
- }
-
// add event parser
if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok {
l.notaryParsers[pi.notaryRequestTypes] = pi.parser()
}
- log.Info(logs.EventRegisteredNewEventParser)
+ log.Info(context.Background(), logs.EventRegisteredNewEventParser)
}
// RegisterNotaryHandler registers the handler for particular notification notary request event.
@@ -593,19 +513,13 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
zap.Stringer("notary type", hi.RequestType()),
)
- handler := hi.Handler()
- if handler == nil {
- log.Warn(logs.EventIgnoreNilNotaryEventHandler)
- return
- }
-
// check if parser was set
l.mtx.RLock()
_, ok := l.notaryParsers[hi.notaryRequestTypes]
l.mtx.RUnlock()
if !ok {
- log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser)
+ log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser)
return
}
@@ -614,7 +528,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler()
l.mtx.Unlock()
- log.Info(logs.EventRegisteredNewEventHandler)
+ log.Info(context.Background(), logs.EventRegisteredNewEventHandler)
}
// Stop closes subscription channel with remote neo node.
@@ -627,11 +541,6 @@ func (l *listener) Stop() {
}
func (l *listener) RegisterBlockHandler(handler BlockHandler) {
- if handler == nil {
- l.log.Warn(logs.EventIgnoreNilBlockHandler)
- return
- }
-
l.blockHandlers = append(l.blockHandlers, handler)
}
@@ -648,7 +557,7 @@ func NewListener(p ListenerParams) (Listener, error) {
// The default capacity is 0, which means "infinite".
pool, err := ants.NewPool(p.WorkerPoolCapacity)
if err != nil {
- return nil, fmt.Errorf("could not init worker pool: %w", err)
+ return nil, fmt.Errorf("init worker pool: %w", err)
}
return &listener{
diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go
index 5f7cf9f43..87f37305f 100644
--- a/pkg/morph/event/listener_test.go
+++ b/pkg/morph/event/listener_test.go
@@ -34,34 +34,24 @@ func TestEventHandling(t *testing.T) {
blockHandled := make(chan bool)
handledBlocks := make([]*block.Block, 0)
- l.RegisterBlockHandler(func(b *block.Block) {
+ l.RegisterBlockHandler(func(_ context.Context, b *block.Block) {
handledBlocks = append(handledBlocks, b)
blockHandled <- true
})
- key := scriptHashWithType{
- scriptHashValue: scriptHashValue{
- hash: util.Uint160{100},
- },
- typeValue: typeValue{
- typ: TypeFromString("notification type"),
- },
- }
-
- l.SetNotificationParser(NotificationParserInfo{
- scriptHashWithType: key,
- p: func(cne *state.ContainedNotificationEvent) (Event, error) {
- return testNotificationEvent{source: cne}, nil
- },
- })
-
notificationHandled := make(chan bool)
handledNotifications := make([]Event, 0)
l.RegisterNotificationHandler(NotificationHandlerInfo{
- scriptHashWithType: key,
- h: func(e Event) {
- handledNotifications = append(handledNotifications, e)
- notificationHandled <- true
+ Contract: util.Uint160{100},
+ Type: TypeFromString("notification type"),
+ Parser: func(cne *state.ContainedNotificationEvent) (Event, error) {
+ return testNotificationEvent{source: cne}, nil
+ },
+ Handlers: []Handler{
+ func(_ context.Context, e Event) {
+ handledNotifications = append(handledNotifications, e)
+ notificationHandled <- true
+ },
},
})
@@ -137,7 +127,7 @@ func TestErrorPassing(t *testing.T) {
WorkerPoolCapacity: 10,
})
require.NoError(t, err, "failed to create listener")
- l.RegisterBlockHandler(func(b *block.Block) {})
+ l.RegisterBlockHandler(func(context.Context, *block.Block) {})
errCh := make(chan error)
diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go
index e454e2a6a..39c8f6237 100644
--- a/pkg/morph/event/netmap/epoch.go
+++ b/pkg/morph/event/netmap/epoch.go
@@ -1,9 +1,7 @@
package netmap
import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -37,22 +35,13 @@ func (s NewEpoch) TxHash() util.Uint256 {
//
// Result is type of NewEpoch.
func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) {
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != 1 {
- return nil, event.WrongNumberOfParameters(1, ln)
- }
-
- prmEpochNum, err := client.IntFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get integer epoch number: %w", err)
+ var nee netmap.NewEpochEvent
+ if err := nee.FromStackItem(e.Item); err != nil {
+ return nil, err
}
return NewEpoch{
- Num: uint64(prmEpochNum),
+ Num: nee.Epoch.Uint64(),
Hash: e.Container,
}, nil
}
diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go
index bc267ecb6..6ff692327 100644
--- a/pkg/morph/event/netmap/epoch_test.go
+++ b/pkg/morph/event/netmap/epoch_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -17,7 +16,7 @@ func TestParseNewEpoch(t *testing.T) {
}
_, err := ParseNewEpoch(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong first parameter type", func(t *testing.T) {
diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go
index 0260810b8..993182ab4 100644
--- a/pkg/morph/event/netmap/update_peer_notary.go
+++ b/pkg/morph/event/netmap/update_peer_notary.go
@@ -10,7 +10,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
)
-var errNilPubKey = errors.New("could not parse public key: public key is nil")
+var errNilPubKey = errors.New("public key is nil")
func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
if v == nil {
@@ -19,7 +19,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256())
if err != nil {
- return fmt.Errorf("could not parse public key: %w", err)
+ return fmt.Errorf("parse public key: %w", err)
}
return
diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go
index 37091f768..b11973646 100644
--- a/pkg/morph/event/notary_preparator.go
+++ b/pkg/morph/event/notary_preparator.go
@@ -127,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
for {
opCode, param, err = ctx.Next()
if err != nil {
- return nil, fmt.Errorf("could not get next opcode in script: %w", err)
+ return nil, fmt.Errorf("get next opcode in script: %w", err)
}
if opCode == opcode.RET {
@@ -147,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
// retrieve contract's script hash
contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param)
if err != nil {
- return nil, fmt.Errorf("could not decode contract hash: %w", err)
+ return nil, fmt.Errorf("decode contract hash: %w", err)
}
// retrieve contract's method
@@ -164,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
if len(args) != 0 {
err = p.validateParameterOpcodes(args)
if err != nil {
- return nil, fmt.Errorf("could not validate arguments: %w", err)
+ return nil, fmt.Errorf("validate arguments: %w", err)
}
// without args packing opcodes
@@ -199,14 +199,14 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error {
// neo-go API)
//
// this check prevents notary flow recursion
- if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 ||
- bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version
+ if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 &&
+ !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version
return ErrTXAlreadyHandled
}
currentAlphabet, err := p.alphaKeys()
if err != nil {
- return fmt.Errorf("could not fetch Alphabet public keys: %w", err)
+ return fmt.Errorf("fetch Alphabet public keys: %w", err)
}
err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
@@ -239,7 +239,7 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error {
argsLen, err := IntFromOpcode(ops[l-2])
if err != nil {
- return fmt.Errorf("could not parse argument len: %w", err)
+ return fmt.Errorf("parse argument len: %w", err)
}
err = validateNestedArgs(argsLen, ops[:l-2])
@@ -273,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error {
argsLen, err := IntFromOpcode(ops[i-1])
if err != nil {
- return fmt.Errorf("could not parse argument len: %w", err)
+ return fmt.Errorf("parse argument len: %w", err)
}
expArgLen += argsLen + 1
@@ -307,7 +307,7 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error {
currBlock, err := p.blockCounter.BlockCount()
if err != nil {
- return fmt.Errorf("could not fetch current chain height: %w", err)
+ return fmt.Errorf("fetch current chain height: %w", err)
}
if currBlock >= nvb.Height {
@@ -327,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("could not get Alphabet verification script: %w", err)
+ return fmt.Errorf("get Alphabet verification script: %w", err)
}
if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) {
@@ -346,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("could not get Alphabet verification script: %w", err)
+ return fmt.Errorf("get Alphabet verification script: %w", err)
}
// the second one must be witness of the current
@@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
// the last one must be a placeholder for notary contract witness
last := len(w) - 1
- if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981
- bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
+ if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981
+ !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
len(w[last].VerificationScript) != 0 {
return errIncorrectNotaryPlaceholder
}
diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go
index 4c269bcbd..60ddb4601 100644
--- a/pkg/morph/event/notary_preparator_test.go
+++ b/pkg/morph/event/notary_preparator_test.go
@@ -25,7 +25,7 @@ var (
alphaKeys keys.PublicKeys
wrongAlphaKeys keys.PublicKeys
- dummyAlphabetInvocationScript = []byte{} // expected to be empty if generated by Notary Actor, as requester can't fill it in
+ dummyAlphabetInvocationScript []byte
dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually
wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...)
diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go
index 90eff0bd2..5adeb4b30 100644
--- a/pkg/morph/event/parsers.go
+++ b/pkg/morph/event/parsers.go
@@ -11,15 +11,6 @@ import (
// from the StackItem list.
type NotificationParser func(*state.ContainedNotificationEvent) (Event, error)
-// NotificationParserInfo is a structure that groups
-// the parameters of particular contract
-// notification event parser.
-type NotificationParserInfo struct {
- scriptHashWithType
-
- p NotificationParser
-}
-
// NotaryPreparator constructs NotaryEvent
// from the NotaryRequest event.
type NotaryPreparator interface {
@@ -47,24 +38,6 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) {
n.p = p
}
-// SetParser is an event parser setter.
-func (s *NotificationParserInfo) SetParser(v NotificationParser) {
- s.p = v
-}
-
-func (s NotificationParserInfo) parser() NotificationParser {
- return s.p
-}
-
-// SetType is an event type setter.
-func (s *NotificationParserInfo) SetType(v Type) {
- s.typ = v
-}
-
-func (s NotificationParserInfo) getType() Type {
- return s.typ
-}
-
type wrongPrmNumber struct {
exp, act int
}
diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go
index 28c968046..b384e436b 100644
--- a/pkg/morph/event/rolemanagement/designate.go
+++ b/pkg/morph/event/rolemanagement/designate.go
@@ -26,7 +26,7 @@ func (Designate) MorphEvent() {}
func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) {
params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("parse stack items from notify event: %w", err)
}
if len(params) != 2 {
diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go
index f3b6443fb..0088be400 100644
--- a/pkg/morph/event/utils.go
+++ b/pkg/morph/event/utils.go
@@ -1,6 +1,7 @@
package event
import (
+ "context"
"errors"
"fmt"
@@ -19,13 +20,9 @@ type scriptHashValue struct {
hash util.Uint160
}
-type typeValue struct {
- typ Type
-}
-
type scriptHashWithType struct {
- scriptHashValue
- typeValue
+ Hash util.Uint160
+ Type Type
}
type notaryRequestTypes struct {
@@ -72,25 +69,15 @@ func (s scriptHashValue) ScriptHash() util.Uint160 {
return s.hash
}
-// SetType is an event type setter.
-func (s *typeValue) SetType(v Type) {
- s.typ = v
-}
-
-// GetType is an event type getter.
-func (s typeValue) GetType() Type {
- return s.typ
-}
-
// WorkerPoolHandler sets closure over worker pool w with passed handler h.
func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler {
- return func(e Event) {
+ return func(ctx context.Context, e Event) {
err := w.Submit(func() {
- h(e)
+ h(ctx, e)
})
if err != nil {
- log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool,
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool,
+ zap.Error(err),
)
}
}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index ee5466a7d..4ef59ed6a 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -245,16 +245,16 @@ routeloop:
}
func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool {
- s.log.Info(logs.RPConnectionLost)
+ s.log.Info(ctx, logs.RPConnectionLost)
if !s.client.SwitchRPC(ctx) {
- s.log.Error(logs.RPCNodeSwitchFailure)
+ s.log.Error(ctx, logs.RPCNodeSwitchFailure)
return false
}
s.Lock()
chs := newSubChannels()
go func() {
- finishCh <- s.restoreSubscriptions(chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
+ finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
}()
s.current = chs
s.Unlock()
@@ -295,7 +295,7 @@ drainloop:
// restoreSubscriptions restores subscriptions according to
// cached information about them.
-func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotificationEvent,
+func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent,
blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent,
) bool {
var err error
@@ -304,7 +304,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
if s.subscribedToNewBlocks {
_, err = s.client.ReceiveBlocks(blCh)
if err != nil {
- s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
@@ -313,7 +313,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
for contract := range s.subscribedEvents {
_, err = s.client.ReceiveExecutionNotifications(contract, notifCh)
if err != nil {
- s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
@@ -322,7 +322,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
for signer := range s.subscribedNotaryEvents {
_, err = s.client.ReceiveNotaryRequests(signer, notaryCh)
if err != nil {
- s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go
index be20d3571..974be1120 100644
--- a/pkg/morph/timer/block.go
+++ b/pkg/morph/timer/block.go
@@ -15,41 +15,19 @@ type BlockTickHandler func()
// It can tick the blocks and perform certain actions
// on block time intervals.
type BlockTimer struct {
- rolledBack bool
-
mtx sync.Mutex
dur BlockMeter
baseDur uint32
- mul, div uint32
-
cur, tgt uint32
last uint32
h BlockTickHandler
- ps []BlockTimer
-
once bool
-
- deltaCfg
-}
-
-// DeltaOption is an option of delta-interval handler.
-type DeltaOption func(*deltaCfg)
-
-type deltaCfg struct {
- pulse bool
-}
-
-// WithPulse returns option to call delta-interval handler multiple times.
-func WithPulse() DeltaOption {
- return func(c *deltaCfg) {
- c.pulse = true
- }
}
// StaticBlockMeter returns BlockMeters that always returns (d, nil).
@@ -65,52 +43,19 @@ func StaticBlockMeter(d uint32) BlockMeter {
func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
- mul: 1,
- div: 1,
h: h,
- deltaCfg: deltaCfg{
- pulse: true,
- },
}
}
// NewOneTickTimer creates a new BlockTimer that ticks only once.
-//
-// Do not use delta handlers with pulse in this timer.
func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
- mul: 1,
- div: 1,
h: h,
once: true,
}
}
-// OnDelta registers handler which is executed on (mul / div * BlockMeter()) block
-// after basic interval reset.
-//
-// If WithPulse option is provided, handler is executed (mul / div * BlockMeter()) block
-// during base interval.
-func (t *BlockTimer) OnDelta(mul, div uint32, h BlockTickHandler, opts ...DeltaOption) {
- c := deltaCfg{
- pulse: false,
- }
-
- for i := range opts {
- opts[i](&c)
- }
-
- t.ps = append(t.ps, BlockTimer{
- mul: mul,
- div: div,
- h: h,
- once: t.once,
-
- deltaCfg: c,
- })
-}
-
// Reset resets previous ticks of the BlockTimer.
//
// Returns BlockMeter's error upon occurrence.
@@ -124,29 +69,18 @@ func (t *BlockTimer) Reset() error {
t.resetWithBaseInterval(d)
- for i := range t.ps {
- t.ps[i].resetWithBaseInterval(d)
- }
-
t.mtx.Unlock()
return nil
}
func (t *BlockTimer) resetWithBaseInterval(d uint32) {
- t.rolledBack = false
t.baseDur = d
t.reset()
}
func (t *BlockTimer) reset() {
- mul, div := t.mul, t.div
-
- if !t.pulse && t.rolledBack && mul < div {
- mul, div = 1, 1
- }
-
- delta := mul * t.baseDur / div
+ delta := t.baseDur
if delta == 0 {
delta = 1
}
@@ -180,12 +114,7 @@ func (t *BlockTimer) tick(h uint32) {
if !t.once {
t.cur = 0
- t.rolledBack = true
t.reset()
}
}
-
- for i := range t.ps {
- t.ps[i].tick(h)
- }
}
diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go
index 7929754c1..a144b3db6 100644
--- a/pkg/morph/timer/block_test.go
+++ b/pkg/morph/timer/block_test.go
@@ -1,6 +1,7 @@
package timer_test
import (
+ "errors"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
@@ -8,7 +9,7 @@ import (
)
func tickN(t *timer.BlockTimer, n uint32) {
- for i := uint32(0); i < n; i++ {
+ for range n {
t.Tick(0)
}
}
@@ -17,7 +18,7 @@ func tickN(t *timer.BlockTimer, n uint32) {
// "resetting" consists of ticking the current height as well and invoking `Reset`.
func TestIRBlockTimer_Reset(t *testing.T) {
var baseCounter [2]int
- blockDur := uint32(3)
+ const blockDur = uint32(3)
bt1 := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
@@ -48,8 +49,40 @@ func TestIRBlockTimer_Reset(t *testing.T) {
require.Equal(t, baseCounter[0], baseCounter[1])
}
+func TestBlockTimer_ResetChangeDuration(t *testing.T) {
+ var dur uint32 = 2
+ var err error
+ var counter int
+
+ bt := timer.NewBlockTimer(
+ func() (uint32, error) { return dur, err },
+ func() { counter++ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 2)
+ require.Equal(t, 1, counter)
+
+ t.Run("return error", func(t *testing.T) {
+ dur = 5
+ err = errors.New("my awesome error")
+ require.ErrorIs(t, bt.Reset(), err)
+
+ tickN(bt, 2)
+ require.Equal(t, 2, counter)
+ })
+ t.Run("change duration", func(t *testing.T) {
+ dur = 5
+ err = nil
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 5)
+ require.Equal(t, 3, counter)
+ })
+}
+
func TestBlockTimer(t *testing.T) {
- blockDur := uint32(10)
+ const blockDur = uint32(10)
baseCallCounter := uint32(0)
bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
@@ -63,85 +96,6 @@ func TestBlockTimer(t *testing.T) {
tickN(bt, intervalNum*blockDur)
require.Equal(t, intervalNum, uint32(baseCallCounter))
-
- // add half-interval handler
- halfCallCounter := uint32(0)
-
- bt.OnDelta(1, 2, func() {
- halfCallCounter++
- })
-
- // add double interval handler
- doubleCallCounter := uint32(0)
-
- bt.OnDelta(2, 1, func() {
- doubleCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- baseCallCounter = 0
- intervalNum = 20
-
- tickN(bt, intervalNum*blockDur)
-
- require.Equal(t, intervalNum, uint32(halfCallCounter))
- require.Equal(t, intervalNum, uint32(baseCallCounter))
- require.Equal(t, intervalNum/2, uint32(doubleCallCounter))
-}
-
-func TestDeltaPulse(t *testing.T) {
- blockDur := uint32(9)
- baseCallCounter := uint32(0)
-
- bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- deltaCallCounter := uint32(0)
-
- div := uint32(3)
-
- bt.OnDelta(1, div, func() {
- deltaCallCounter++
- }, timer.WithPulse())
-
- require.NoError(t, bt.Reset())
-
- intervalNum := uint32(7)
-
- tickN(bt, intervalNum*blockDur)
-
- require.Equal(t, intervalNum, uint32(baseCallCounter))
- require.Equal(t, intervalNum*div, uint32(deltaCallCounter))
-}
-
-func TestDeltaReset(t *testing.T) {
- blockDur := uint32(6)
- baseCallCounter := 0
-
- bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- detlaCallCounter := 0
-
- bt.OnDelta(1, 3, func() {
- detlaCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 6)
-
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 3)
-
- require.Equal(t, 2, detlaCallCounter)
}
func TestNewOneTickTimer(t *testing.T) {
@@ -168,82 +122,51 @@ func TestNewOneTickTimer(t *testing.T) {
tickN(bt, 10)
require.Equal(t, 1, baseCallCounter)
})
-
- t.Run("delta without pulse", func(t *testing.T) {
- blockDur = uint32(10)
- baseCallCounter = 0
-
- bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- detlaCallCounter := 0
-
- bt.OnDelta(1, 10, func() {
- detlaCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 10)
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
-
- tickN(bt, 10) // 10 more ticks must not affect counters
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
- })
}
func TestBlockTimer_TickSameHeight(t *testing.T) {
- var baseCounter, deltaCounter int
+ var baseCounter int
blockDur := uint32(2)
bt := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
func() { baseCounter++ })
- bt.OnDelta(2, 1, func() {
- deltaCounter++
- })
require.NoError(t, bt.Reset())
- check := func(t *testing.T, h uint32, base, delta int) {
+ check := func(t *testing.T, h uint32, base int) {
for range 2 * int(blockDur) {
bt.Tick(h)
require.Equal(t, base, baseCounter)
- require.Equal(t, delta, deltaCounter)
}
}
- check(t, 1, 0, 0)
- check(t, 2, 1, 0)
- check(t, 3, 1, 0)
- check(t, 4, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 1)
+ check(t, 3, 1)
+ check(t, 4, 2)
t.Run("works the same way after `Reset()`", func(t *testing.T) {
t.Run("same block duration", func(t *testing.T) {
require.NoError(t, bt.Reset())
baseCounter = 0
- deltaCounter = 0
- check(t, 1, 0, 0)
- check(t, 2, 1, 0)
- check(t, 3, 1, 0)
- check(t, 4, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 1)
+ check(t, 3, 1)
+ check(t, 4, 2)
})
t.Run("different block duration", func(t *testing.T) {
blockDur = 3
require.NoError(t, bt.Reset())
baseCounter = 0
- deltaCounter = 0
- check(t, 1, 0, 0)
- check(t, 2, 0, 0)
- check(t, 3, 1, 0)
- check(t, 4, 1, 0)
- check(t, 5, 1, 0)
- check(t, 6, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 0)
+ check(t, 3, 1)
+ check(t, 4, 1)
+ check(t, 5, 1)
+ check(t, 6, 2)
})
})
}
diff --git a/pkg/network/address.go b/pkg/network/address.go
index 88f4a571d..4643eef15 100644
--- a/pkg/network/address.go
+++ b/pkg/network/address.go
@@ -2,12 +2,12 @@ package network
import (
"errors"
- "fmt"
"net"
"net/url"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
@@ -44,11 +44,9 @@ func (a Address) equal(addr Address) bool {
// See also FromString.
func (a Address) URIAddr() string {
_, host, err := manet.DialArgs(a.ma)
- if err != nil {
- // the only correct way to construct Address is AddressFromString
- // which makes this error appear unexpected
- panic(fmt.Errorf("could not get host addr: %w", err))
- }
+ // the only correct way to construct Address is AddressFromString
+ // which makes this error appear unexpected
+ assert.NoError(err, "could not get host addr")
if !a.IsTLSEnabled() {
return host
diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go
index 371d3c76f..63ae0bfdb 100644
--- a/pkg/network/cache/client.go
+++ b/pkg/network/cache/client.go
@@ -5,6 +5,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
@@ -25,6 +26,7 @@ type (
Key *ecdsa.PrivateKey
ResponseCallback func(client.ResponseMetaInfo) error
AllowExternal bool
+ DialerSource *net.DialerSource
}
)
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 9305c143b..54c1e18fb 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -7,11 +7,13 @@ import (
"sync"
"time"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"google.golang.org/grpc"
@@ -60,18 +62,26 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address
prmInit.Key = *x.opts.Key
}
+ grpcOpts := []grpc.DialOption{
+ grpc.WithChainUnaryInterceptor(
+ qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInterceptor(),
+ tagging.NewUnaryClientInterceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ tagging.NewStreamClientInterceptor(),
+ ),
+ grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()),
+ grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
+ }
+
prmDial := client.PrmDial{
- Endpoint: addr.URIAddr(),
- GRPCDialOptions: []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(
- metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInteceptor(),
- ),
- grpc.WithChainStreamInterceptor(
- metrics.NewStreamClientInterceptor(),
- tracing.NewStreamClientInterceptor(),
- ),
- },
+ Endpoint: addr.URIAddr(),
+ GRPCDialOptions: grpcOpts,
}
if x.opts.DialTimeout > 0 {
prmDial.DialTimeout = x.opts.DialTimeout
@@ -151,7 +161,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
group.IterateAddresses(func(addr network.Address) bool {
select {
case <-ctx.Done():
- firstErr = context.Canceled
+ firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled)
return true
default:
}
@@ -169,15 +179,16 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
var siErr *objectSDK.SplitInfoError
var eiErr *objectSDK.ECInfoError
+ if err != nil {
+ err = fmt.Errorf("client connection error at %v: %w", addr, err)
+ x.ReportError(err)
+ }
+
success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr)
if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) {
firstErr = err
}
- if err != nil {
- x.ReportError(err)
- }
-
return success
})
diff --git a/pkg/network/group.go b/pkg/network/group.go
index a6de0653e..0044fb2d4 100644
--- a/pkg/network/group.go
+++ b/pkg/network/group.go
@@ -3,6 +3,8 @@ package network
import (
"errors"
"fmt"
+ "iter"
+ "slices"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -67,11 +69,10 @@ func (x AddressGroup) Swap(i, j int) {
// MultiAddressIterator is an interface of network address group.
type MultiAddressIterator interface {
- // Must iterate over network addresses and pass each one
- // to the handler until it returns true.
- IterateAddresses(func(string) bool)
+ // Addresses must return an iterator over network addresses.
+ Addresses() iter.Seq[string]
- // Must return number of addresses in group.
+ // NumberOfAddresses must return number of addresses in group.
NumberOfAddresses() int
}
@@ -130,19 +131,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error {
// iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f
// until 1st parsing failure or f's error.
func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) {
- iter.IterateAddresses(func(s string) bool {
+ for s := range iter.Addresses() {
var a Address
err = a.FromString(s)
if err != nil {
- err = fmt.Errorf("could not parse address from string: %w", err)
- return true
+ return fmt.Errorf("could not parse address from string: %w", err)
}
err = f(a)
-
- return err != nil
- })
+ if err != nil {
+ return err
+ }
+ }
return
}
@@ -164,10 +165,8 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) {
// at least one common address.
func (x AddressGroup) Intersects(x2 AddressGroup) bool {
for i := range x {
- for j := range x2 {
- if x[i].equal(x2[j]) {
- return true
- }
+ if slices.ContainsFunc(x2, x[i].equal) {
+ return true
}
}
diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go
index 5b335fa52..d08264533 100644
--- a/pkg/network/group_test.go
+++ b/pkg/network/group_test.go
@@ -1,6 +1,8 @@
package network
import (
+ "iter"
+ "slices"
"sort"
"testing"
@@ -58,10 +60,8 @@ func TestAddressGroup_FromIterator(t *testing.T) {
type testIterator []string
-func (t testIterator) IterateAddresses(f func(string) bool) {
- for i := range t {
- f(t[i])
- }
+func (t testIterator) Addresses() iter.Seq[string] {
+ return slices.Values(t)
}
func (t testIterator) NumberOfAddresses() int {
diff --git a/pkg/network/transport/accounting/grpc/service.go b/pkg/network/transport/accounting/grpc/service.go
index 2144a3001..78129bfbe 100644
--- a/pkg/network/transport/accounting/grpc/service.go
+++ b/pkg/network/transport/accounting/grpc/service.go
@@ -3,9 +3,9 @@ package accounting
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
- accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc"
accountingsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
+ accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc"
)
// Server wraps FrostFS API Accounting service and
diff --git a/pkg/network/transport/apemanager/grpc/service.go b/pkg/network/transport/apemanager/grpc/service.go
index 59783cfc0..850d38a65 100644
--- a/pkg/network/transport/apemanager/grpc/service.go
+++ b/pkg/network/transport/apemanager/grpc/service.go
@@ -3,9 +3,9 @@ package apemanager
import (
"context"
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
- apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc"
apemanager_svc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager"
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+ apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
)
type Server struct {
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index 9fae22b45..8cbf8d9c3 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -3,9 +3,9 @@ package container
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
containersvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
)
// Server wraps FrostFS API Container service and
@@ -80,3 +80,26 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
+
+type containerStreamerV2 struct {
+ containerGRPC.ContainerService_ListStreamServer
+}
+
+func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error {
+ return s.ContainerService_ListStreamServer.Send(
+ resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse),
+ )
+}
+
+// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data
+// to gRPC stream.
+func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error {
+ listReq := new(container.ListStreamRequest)
+ if err := listReq.FromGRPCMessage(req); err != nil {
+ return err
+ }
+
+ return s.srv.ListStream(listReq, &containerStreamerV2{
+ ContainerService_ListStreamServer: gStream,
+ })
+}
diff --git a/pkg/network/transport/netmap/grpc/service.go b/pkg/network/transport/netmap/grpc/service.go
index 406c77e58..4bc3a42f8 100644
--- a/pkg/network/transport/netmap/grpc/service.go
+++ b/pkg/network/transport/netmap/grpc/service.go
@@ -3,9 +3,9 @@ package grpc
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
- netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
netmapsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
+ netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc"
)
// Server wraps FrostFS API Netmap service and
diff --git a/pkg/network/transport/object/grpc/get.go b/pkg/network/transport/object/grpc/get.go
index e1655c183..655b1f9fb 100644
--- a/pkg/network/transport/object/grpc/get.go
+++ b/pkg/network/transport/object/grpc/get.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
type getStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/range.go b/pkg/network/transport/object/grpc/range.go
index 391536e8e..7d7ce0e4c 100644
--- a/pkg/network/transport/object/grpc/range.go
+++ b/pkg/network/transport/object/grpc/range.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
type getRangeStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/search.go b/pkg/network/transport/object/grpc/search.go
index a151ced09..8432707f7 100644
--- a/pkg/network/transport/object/grpc/search.go
+++ b/pkg/network/transport/object/grpc/search.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
type searchStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index d55e3d87f..15dacd553 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -5,10 +5,10 @@ import (
"errors"
"io"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
// Server wraps FrostFS API Object service and
@@ -26,7 +26,7 @@ func New(c objectSvc.ServiceServer) *Server {
// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream.
func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
- stream, err := s.srv.Patch()
+ stream, err := s.srv.Patch(gStream.Context())
if err != nil {
return err
}
@@ -68,7 +68,7 @@ func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
- stream, err := s.srv.Put()
+ stream, err := s.srv.Put(gStream.Context())
if err != nil {
return err
}
diff --git a/pkg/network/transport/session/grpc/service.go b/pkg/network/transport/session/grpc/service.go
index e0dc74942..6fce397f3 100644
--- a/pkg/network/transport/session/grpc/service.go
+++ b/pkg/network/transport/session/grpc/service.go
@@ -3,9 +3,9 @@ package session
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
sessionsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc"
)
// Server wraps FrostFS API Session service and
diff --git a/pkg/network/validation.go b/pkg/network/validation.go
index 92f650119..b5157f28f 100644
--- a/pkg/network/validation.go
+++ b/pkg/network/validation.go
@@ -2,6 +2,7 @@ package network
import (
"errors"
+ "iter"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -34,8 +35,8 @@ var (
// MultiAddressIterator.
type NodeEndpointsIterator netmap.NodeInfo
-func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) {
- (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
+func (x NodeEndpointsIterator) Addresses() iter.Seq[string] {
+ return (netmap.NodeInfo)(x).NetworkEndpoints()
}
func (x NodeEndpointsIterator) NumberOfAddresses() int {
diff --git a/pkg/services/accounting/executor.go b/pkg/services/accounting/executor.go
index b0722cf8a..93e44c52b 100644
--- a/pkg/services/accounting/executor.go
+++ b/pkg/services/accounting/executor.go
@@ -4,8 +4,8 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
type ServiceExecutor interface {
diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go
index ac836b71d..6c2df8428 100644
--- a/pkg/services/accounting/morph/executor.go
+++ b/pkg/services/accounting/morph/executor.go
@@ -5,9 +5,9 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
accountingSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
+func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errors.New("missing account")
@@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceReque
return nil, fmt.Errorf("invalid account: %w", err)
}
- amount, err := s.client.BalanceOf(id)
+ amount, err := s.client.BalanceOf(ctx, id)
if err != nil {
return nil, err
}
- balancePrecision, err := s.client.Decimals()
+ balancePrecision, err := s.client.Decimals(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/accounting/server.go b/pkg/services/accounting/server.go
index 72833c46c..a280416fb 100644
--- a/pkg/services/accounting/server.go
+++ b/pkg/services/accounting/server.go
@@ -3,7 +3,7 @@ package accounting
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
// Server is an interface of the FrostFS API Accounting service server.
diff --git a/pkg/services/accounting/sign.go b/pkg/services/accounting/sign.go
index cd6ff0307..d8feb76bd 100644
--- a/pkg/services/accounting/sign.go
+++ b/pkg/services/accounting/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
type signService struct {
diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go
index d132ae7db..61fb025b8 100644
--- a/pkg/services/apemanager/audit.go
+++ b/pkg/services/apemanager/audit.go
@@ -4,10 +4,10 @@ import (
"context"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
- ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+ ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
)
var _ Server = (*auditService)(nil)
@@ -33,7 +33,7 @@ func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainReq
return res, err
}
- audit.LogRequest(a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
res.GetBody().GetChainID()),
@@ -49,7 +49,7 @@ func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChain
return res, err
}
- audit.LogRequest(a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
nil),
@@ -65,7 +65,7 @@ func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveCh
return res, err
}
- audit.LogRequest(a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
req.GetBody().GetChainID()),
diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go
index e64f9a8d1..1d485321c 100644
--- a/pkg/services/apemanager/errors/errors.go
+++ b/pkg/services/apemanager/errors/errors.go
@@ -9,3 +9,9 @@ func ErrAPEManagerAccessDenied(reason string) error {
err.WriteReason(reason)
return err
}
+
+func ErrAPEManagerInvalidArgument(msg string) error {
+ err := new(apistatus.InvalidArgument)
+ err.SetMessage(msg)
+ return err
+}
diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go
index 25f43486a..fc08fe569 100644
--- a/pkg/services/apemanager/executor.go
+++ b/pkg/services/apemanager/executor.go
@@ -8,20 +8,21 @@ import (
"errors"
"fmt"
- apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape"
- apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
apemanager_errors "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager/errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ apeV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/ape"
+ apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/mr-tron/base58/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
@@ -34,6 +35,8 @@ type cfg struct {
type Service struct {
cfg
+ waiter Waiter
+
cnrSrc containercore.Source
contractStorage ape_contract.ProxyAdaptedContractStorage
@@ -41,11 +44,17 @@ type Service struct {
type Option func(*cfg)
-func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, opts ...Option) *Service {
+type Waiter interface {
+ WaitTxHalt(context.Context, uint32, util.Uint256) error
+}
+
+func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service {
s := &Service{
cnrSrc: cnrSrc,
contractStorage: contractStorage,
+
+ waiter: waiter,
}
for i := range opts {
@@ -53,7 +62,7 @@ func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedC
}
if s.log == nil {
- s.log = &logger.Logger{Logger: zap.NewNop()}
+ s.log = logger.NewLoggerWrapper(zap.NewNop())
}
return s
@@ -69,12 +78,12 @@ var _ Server = (*Service)(nil)
// validateContainerTargetRequest validates request for the container target.
// It checks if request actor is the owner of the container, otherwise it denies the request.
-func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.PublicKey) error {
+func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error {
var cidSDK cidSDK.ID
if err := cidSDK.DecodeString(cid); err != nil {
- return fmt.Errorf("invalid CID format: %w", err)
+ return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err))
}
- isOwner, err := s.isActorContainerOwner(cidSDK, pubKey)
+ isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey)
if err != nil {
return fmt.Errorf("failed to check owner: %w", err)
}
@@ -84,7 +93,7 @@ func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.Public
return nil
}
-func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
+func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -92,7 +101,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest)
chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw())
if err != nil {
- return nil, err
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error())
}
if len(chain.ID) == 0 {
const randomIDLength = 10
@@ -108,15 +117,19 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest)
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, fmt.Errorf("unsupported target type: %s", targetType)
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
}
- if _, _, err = s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain); err != nil {
+ txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
return nil, err
}
@@ -129,7 +142,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest)
return resp, nil
}
-func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
+func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -140,15 +153,19 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, fmt.Errorf("unsupported target type: %s", targetType)
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
}
- if _, _, err = s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()); err != nil {
+ txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID())
+ if err != nil {
+ return nil, err
+ }
+ if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
return nil, err
}
@@ -160,7 +177,7 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe
return resp, nil
}
-func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
+func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -171,12 +188,12 @@ func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequ
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, fmt.Errorf("unsupported target type: %s", targetType)
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
}
chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target)
@@ -210,23 +227,23 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK
}
sig := vh.GetBodySignature()
if sig == nil {
- return nil, errEmptyBodySignature
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error())
}
key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("invalid signature key: %w", err)
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err))
}
return key, nil
}
-func (s *Service) isActorContainerOwner(cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
+func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
var actor user.ID
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
actorOwnerID := new(refs.OwnerID)
actor.WriteToV2(actorOwnerID)
- cnr, err := s.cnrSrc.Get(cid)
+ cnr, err := s.cnrSrc.Get(ctx, cid)
if err != nil {
return false, fmt.Errorf("get container error: %w", err)
}
diff --git a/pkg/services/apemanager/server.go b/pkg/services/apemanager/server.go
index 90b2d92ae..e624177ac 100644
--- a/pkg/services/apemanager/server.go
+++ b/pkg/services/apemanager/server.go
@@ -3,7 +3,7 @@ package apemanager
import (
"context"
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
)
type Server interface {
diff --git a/pkg/services/apemanager/sign.go b/pkg/services/apemanager/sign.go
index eda2a7342..a172624ff 100644
--- a/pkg/services/apemanager/sign.go
+++ b/pkg/services/apemanager/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
)
type signService struct {
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
index 278f6da31..eb6263320 100644
--- a/pkg/services/common/ape/checker.go
+++ b/pkg/services/common/ape/checker.go
@@ -1,6 +1,7 @@
package ape
import (
+ "context"
"crypto/ecdsa"
"errors"
"fmt"
@@ -11,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
@@ -20,7 +20,6 @@ import (
)
var (
- errInvalidTargetType = errors.New("bearer token defines non-container target override")
errBearerExpired = errors.New("bearer token has expired")
errBearerInvalidSignature = errors.New("bearer token has invalid signature")
errBearerInvalidContainerID = errors.New("bearer token was created for another container")
@@ -44,15 +43,12 @@ type CheckPrm struct {
// The request's bearer token. It is used in order to check APE overrides with the token.
BearerToken *bearer.Token
-
- // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
- SoftAPECheck bool
}
// CheckCore provides methods to perform the common logic of APE check.
type CheckCore interface {
// CheckAPE performs the common policy-engine check logic on a prepared request.
- CheckAPE(prm CheckPrm) error
+ CheckAPE(ctx context.Context, prm CheckPrm) error
}
type checkerCoreImpl struct {
@@ -74,22 +70,30 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora
}
// CheckAPE performs the common policy-engine check logic on a prepared request.
-func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error {
+func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error {
var cr policyengine.ChainRouter
- if prm.BearerToken != nil && !prm.BearerToken.Impersonate() {
+ if prm.BearerToken != nil {
var err error
if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil {
return fmt.Errorf("bearer validation error: %w", err)
}
- cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride())
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
+ if prm.BearerToken.Impersonate() {
+ cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
+ } else {
+ override, isSet := prm.BearerToken.APEOverride()
+ if !isSet {
+ return errors.New("expected for override within bearer")
+ }
+ cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override)
+ if err != nil {
+ return fmt.Errorf("create chain router error: %w", err)
+ }
}
} else {
cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
}
- groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey)
+ groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey)
if err != nil {
return fmt.Errorf("failed to get group ids: %w", err)
}
@@ -104,17 +108,10 @@ func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error {
if err != nil {
return err
}
- if !found && prm.SoftAPECheck || status == apechain.Allow {
+ if found && status == apechain.Allow {
return nil
}
- err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String())
- return apeErr(err)
-}
-
-func apeErr(err error) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(err.Error())
- return errAccessDenied
+ return newChainRouterError(prm.Request.Operation(), status)
}
// isValidBearer checks whether bearer token was correctly signed by authorized
@@ -136,19 +133,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe
}
// Check for ape overrides defined in the bearer token.
- apeOverride := token.APEOverride()
- if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
- return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
- }
-
- // Then check if container is either empty or equal to the container in the request.
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !cntID.Equals(targetCnr) {
- return errBearerInvalidContainerID
+ if apeOverride, isSet := token.APEOverride(); isSet {
+ switch apeOverride.Target.TargetType {
+ case ape.TargetTypeContainer:
+ var targetCnr cid.ID
+ err := targetCnr.DecodeString(apeOverride.Target.Name)
+ if err != nil {
+ return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
+ }
+ if !cntID.Equals(targetCnr) {
+ return errBearerInvalidContainerID
+ }
+ default:
+ }
}
// Then check if container owner signed this token.
@@ -160,8 +157,16 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe
var usrSender user.ID
user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
+ // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's
+ // public key, but not the actual sender.
+ if !token.Impersonate() {
+ if !token.AssertUser(usrSender) {
+ return errBearerInvalidOwner
+ }
+ } else {
+ if !bearer.ResolveIssuer(*token).Equals(usrSender) {
+ return errBearerInvalidOwner
+ }
}
return nil
diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go
new file mode 100644
index 000000000..d3c381de7
--- /dev/null
+++ b/pkg/services/common/ape/error.go
@@ -0,0 +1,33 @@
+package ape
+
+import (
+ "fmt"
+
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+)
+
+// ChainRouterError is returned when chain router validation prevents
+// the APE request from being processed (no rule found, access denied, etc.).
+type ChainRouterError struct {
+ operation string
+ status apechain.Status
+}
+
+func (e *ChainRouterError) Error() string {
+ return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status())
+}
+
+func (e *ChainRouterError) Operation() string {
+ return e.operation
+}
+
+func (e *ChainRouterError) Status() apechain.Status {
+ return e.status
+}
+
+func newChainRouterError(operation string, status apechain.Status) *ChainRouterError {
+ return &ChainRouterError{
+ operation: operation,
+ status: status,
+ }
+}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
index d92ecf58b..3b5dab9aa 100644
--- a/pkg/services/container/ape.go
+++ b/pkg/services/container/ape.go
@@ -12,14 +12,14 @@ import (
"net"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -49,11 +49,11 @@ var (
)
type ir interface {
- InnerRingKeys() ([][]byte, error)
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
}
type containers interface {
- Get(cid.ID) (*containercore.Container, error)
+ Get(context.Context, cid.ID) (*containercore.Container, error)
}
type apeChecker struct {
@@ -106,7 +106,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
defer span.End()
- role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
if err != nil {
return nil, err
}
@@ -116,7 +116,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
if err != nil {
return nil, err
}
@@ -126,11 +126,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
}
}
- namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID())
+ namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("could not get owner namespace: %w", err)
}
- if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil {
+ if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
return nil, err
}
@@ -143,7 +143,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
reqProps,
)
- groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
if err != nil {
return nil, fmt.Errorf("failed to get group ids: %w", err)
}
@@ -175,11 +175,84 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
return nil, apeErr(nativeschema.MethodListContainers, s)
}
+func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream")
+ defer span.End()
+
+ role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ if err != nil {
+ return err
+ }
+
+ reqProps := map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
+ nativeschema.PropertyKeyActorRole: role,
+ }
+
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ if err != nil {
+ return err
+ }
+ if p, ok := peer.FromContext(ctx); ok {
+ if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
+ reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
+ }
+ }
+
+ namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
+ if err != nil {
+ return fmt.Errorf("could not get owner namespace: %w", err)
+ }
+ if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
+ return err
+ }
+
+ request := aperequest.NewRequest(
+ nativeschema.MethodListContainers,
+ aperequest.NewResource(
+ resourceName(namespace, ""),
+ make(map[string]string),
+ ),
+ reqProps,
+ )
+
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ if err != nil {
+ return fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
+ }
+
+ rt := policyengine.NewRequestTargetWithNamespace(namespace)
+ rt.User = &policyengine.Target{
+ Type: policyengine.User,
+ Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
+ }
+ rt.Groups = make([]policyengine.Target, len(groups))
+ for i := range groups {
+ rt.Groups[i] = policyengine.GroupTarget(groups[i])
+ }
+
+ s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
+ if err != nil {
+ return err
+ }
+
+ if found && s == apechain.Allow {
+ return ac.next.ListStream(req, stream)
+ }
+
+ return apeErr(nativeschema.MethodListContainers, s)
+}
+
func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put")
defer span.End()
- role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
if err != nil {
return nil, err
}
@@ -189,7 +262,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
if err != nil {
return nil, err
}
@@ -199,7 +272,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
}
}
- namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID())
+ namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("get namespace error: %w", err)
}
@@ -207,16 +280,21 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, err
}
+ cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer())
+ if err != nil {
+ return nil, fmt.Errorf("get container properties: %w", err)
+ }
+
request := aperequest.NewRequest(
nativeschema.MethodPutContainer,
aperequest.NewResource(
resourceName(namespace, ""),
- make(map[string]string),
+ cnrProps,
),
reqProps,
)
- groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
if err != nil {
return nil, fmt.Errorf("failed to get group ids: %w", err)
}
@@ -248,7 +326,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, apeErr(nativeschema.MethodPutContainer, s)
}
-func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
+func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
if vh == nil {
return "", nil, errMissingVerificationHeader
}
@@ -271,7 +349,7 @@ func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.R
}
pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(pkBytes)
+ isIR, err := ac.isInnerRingKey(ctx, pkBytes)
if err != nil {
return "", nil, err
}
@@ -292,7 +370,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
return err
}
- cont, err := ac.reader.Get(id)
+ cont, err := ac.reader.Get(ctx, id)
if err != nil {
return err
}
@@ -308,7 +386,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
namespace = cntNamespace
}
- groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
if err != nil {
return fmt.Errorf("failed to get group ids: %w", err)
}
@@ -322,7 +400,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
op,
aperequest.NewResource(
resourceName(namespace, id.EncodeToString()),
- ac.getContainerProps(cont),
+ getContainerProps(cont),
),
reqProps,
)
@@ -372,10 +450,26 @@ func resourceName(namespace string, container string) string {
return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container)
}
-func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string {
- return map[string]string{
+func getContainerProps(c *containercore.Container) map[string]string {
+ props := map[string]string{
nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(),
}
+ for attrName, attrVal := range c.Value.Attributes() {
+ name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName)
+ props[name] = attrVal
+ }
+ return props
+}
+
+func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) {
+ if cnrV2 == nil {
+ return nil, errors.New("container is not set")
+ }
+ c := cnrSDK.Container{}
+ if err := c.ReadFromV2(*cnrV2); err != nil {
+ return nil, err
+ }
+ return getContainerProps(&containercore.Container{Value: c}), nil
}
func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader,
@@ -385,7 +479,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
if err != nil {
return nil, nil, err
}
- role, err := ac.getRole(actor, pk, cont, cnrID)
+ role, err := ac.getRole(ctx, actor, pk, cont, cnrID)
if err != nil {
return nil, nil, err
}
@@ -393,7 +487,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
if err != nil {
return nil, nil, err
}
@@ -405,13 +499,13 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
return reqProps, pk, nil
}
-func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
+func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
if cont.Value.Owner().Equals(*actor) {
return nativeschema.PropertyValueContainerRoleOwner, nil
}
pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(pkBytes)
+ isIR, err := ac.isInnerRingKey(ctx, pkBytes)
if err != nil {
return "", err
}
@@ -419,7 +513,7 @@ func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containe
return nativeschema.PropertyValueContainerRoleIR, nil
}
- isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont)
+ isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont)
if err != nil {
return "", err
}
@@ -513,8 +607,8 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) {
- innerRingKeys, err := ac.ir.InnerRingKeys()
+func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) {
+ innerRingKeys, err := ac.ir.InnerRingKeys(ctx)
if err != nil {
return false, err
}
@@ -528,50 +622,47 @@ func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) {
return false, nil
}
-func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
+func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
binCnrID := make([]byte, sha256.Size)
cnrID.Encode(binCnrID)
- nm, err := netmap.GetLatestNetworkMap(ac.nm)
+ nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm)
if err != nil {
return false, err
}
- in, err := isContainerNode(nm, pk, binCnrID, cont)
- if err != nil {
- return false, err
- } else if in {
+ if isContainerNode(nm, pk, binCnrID, cont) {
return true, nil
}
// then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container
- nm, err = netmap.GetPreviousNetworkMap(ac.nm)
+ nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm)
if err != nil {
return false, err
}
- return isContainerNode(nm, pk, binCnrID, cont)
+ return isContainerNode(nm, pk, binCnrID, cont), nil
}
-func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) (bool, error) {
- cnrVectors, err := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
- if err != nil {
- return false, err
- }
+func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool {
+ // It could an error only if the network map doesn't have enough nodes to
+ // fulfil the policy. It's a logical error that doesn't affect an actor role
+ // determining, so we ignore it
+ cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
for i := range cnrVectors {
for j := range cnrVectors[i] {
if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
- return true, nil
+ return true
}
}
}
- return false, nil
+ return false
}
-func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
+func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
var ownerSDK user.ID
if owner == nil {
return "", errOwnerIDIsNotSet
@@ -579,24 +670,19 @@ func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
if err := ownerSDK.ReadFromV2(*owner); err != nil {
return "", err
}
- addr, err := ownerSDK.ScriptHash()
- if err != nil {
- return "", err
- }
+ addr := ownerSDK.ScriptHash()
namespace := ""
- subject, err := ac.frostFSIDClient.GetSubject(addr)
+ subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
if err == nil {
namespace = subject.Namespace
- } else {
- if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
- return "", fmt.Errorf("get subject error: %w", err)
- }
+ } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
+ return "", fmt.Errorf("get subject error: %w", err)
}
return namespace, nil
}
-func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) {
+func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
var ownerSDK user.ID
if owner == nil {
return "", errOwnerIDIsNotSet
@@ -604,11 +690,8 @@ func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error)
if err := ownerSDK.ReadFromV2(*owner); err != nil {
return "", err
}
- addr, err := ownerSDK.ScriptHash()
- if err != nil {
- return "", err
- }
- subject, err := ac.frostFSIDClient.GetSubject(addr)
+ addr := ownerSDK.ScriptHash()
+ subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
if err != nil {
return "", fmt.Errorf("get subject error: %w", err)
}
@@ -642,12 +725,12 @@ func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) erro
// validateNamespace validates if a namespace of a request actor equals to owner's namespace.
// An actor's namespace is calculated by a public key.
-func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error {
+func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error {
var actor user.ID
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
actorOwnerID := new(refs.OwnerID)
actor.WriteToV2(actorOwnerID)
- actorNamespace, err := ac.namespaceByOwner(actorOwnerID)
+ actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID)
if err != nil {
return fmt.Errorf("could not get actor namespace: %w", err)
}
@@ -658,11 +741,11 @@ func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNa
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
+func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
- props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk)
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
index d6f9b75ef..6438c34ca 100644
--- a/pkg/services/container/ape_test.go
+++ b/pkg/services/container/ape_test.go
@@ -9,13 +9,13 @@ import (
"net"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -54,6 +54,8 @@ func TestAPE(t *testing.T) {
t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace)
t.Run("deny list containers for owner with PK", testDenyListContainersForPK)
t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError)
+ t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr)
+ t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr)
}
const (
@@ -564,6 +566,185 @@ func testDenyGetContainerByIP(t *testing.T) {
require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
}
+func testDenyGetContainerSysZoneAttr(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindResource,
+ Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
+ Value: "eggplant",
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.Get(ctxWithPeerInfo(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
+}
+
+func testDenyPutContainerSysZoneAttr(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+ owner := testContainer.Owner()
+ ownerAddr := owner.ScriptHash()
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ ownerAddr: {},
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ ownerAddr: {},
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodPutContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ nativeschema.ResourceFormatRootContainers,
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindResource,
+ Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
+ Value: "eggplant",
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initPutRequest(t, testContainer)
+
+ resp, err := apeSrv.Put(ctxWithPeerInfo(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
+}
+
func testDenyGetContainerByGroupID(t *testing.T) {
t.Parallel()
srv := &srvStub{
@@ -678,8 +859,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
testContainer := containertest.Container()
owner := testContainer.Owner()
- ownerAddr, err := owner.ScriptHash()
- require.NoError(t, err)
+ ownerAddr := owner.ScriptHash()
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
ownerAddr: {},
@@ -690,7 +870,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
nm.currentEpoch = 100
nm.netmaps = map[uint64]*netmap.NetMap{}
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
Rules: []chain.Rule{
{
Status: chain.AccessDenied,
@@ -773,7 +953,7 @@ func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) {
require.NoError(t, err)
req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(t, testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(testContainer)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
@@ -857,7 +1037,7 @@ func testDenyPutContainerInvalidNamespace(t *testing.T) {
require.NoError(t, err)
req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(t, testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(testContainer)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
@@ -1079,6 +1259,11 @@ func (s *srvStub) List(context.Context, *container.ListRequest) (*container.List
return &container.ListResponse{}, nil
}
+func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error {
+ s.calls["ListStream"]++
+ return nil
+}
+
func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) {
s.calls["Put"]++
return &container.PutResponse{}, nil
@@ -1088,7 +1273,7 @@ type irStub struct {
keys [][]byte
}
-func (s *irStub) InnerRingKeys() ([][]byte, error) {
+func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) {
return s.keys, nil
}
@@ -1096,7 +1281,7 @@ type containerStub struct {
c map[cid.ID]*containercore.Container
}
-func (s *containerStub) Get(id cid.ID) (*containercore.Container, error) {
+func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) {
if v, ok := s.c[id]; ok {
return v, nil
}
@@ -1108,21 +1293,21 @@ type netmapStub struct {
currentEpoch uint64
}
-func (s *netmapStub) GetNetMap(diff uint64) (*netmap.NetMap, error) {
+func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, errors.New("invalid diff")
}
- return s.GetNetMapByEpoch(s.currentEpoch - diff)
+ return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
}
-func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
+func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, errors.New("netmap not found")
}
-func (s *netmapStub) Epoch() (uint64, error) {
+func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
return s.currentEpoch, nil
}
@@ -1131,7 +1316,7 @@ type frostfsidStub struct {
subjectsExt map[util.Uint160]*client.SubjectExtended
}
-func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) {
+func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) {
s, ok := f.subjects[owner]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -1139,7 +1324,7 @@ func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error)
return s, nil
}
-func (f *frostfsidStub) GetSubjectExtended(owner util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) {
s, ok := f.subjectsExt[owner]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -1527,26 +1712,21 @@ func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.Put
return req
}
-func initOwnerIDScriptHash(t *testing.T, testContainer cnrSDK.Container) util.Uint160 {
+func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 {
var ownerSDK *user.ID
owner := testContainer.Owner()
ownerSDK = &owner
- sc, err := ownerSDK.ScriptHash()
- require.NoError(t, err)
- return sc
+ return ownerSDK.ScriptHash()
}
func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) {
var actorUserID user.ID
user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey()))
- var err error
- actorScriptHash, err = actorUserID.ScriptHash()
- require.NoError(t, err)
+ actorScriptHash = actorUserID.ScriptHash()
var ownerUserID user.ID
user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey()))
- ownerScriptHash, err = ownerUserID.ScriptHash()
- require.NoError(t, err)
+ ownerScriptHash = ownerUserID.ScriptHash()
require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String())
return
}
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
index b257272f5..b235efa3c 100644
--- a/pkg/services/container/audit.go
+++ b/pkg/services/container/audit.go
@@ -4,10 +4,10 @@ import (
"context"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- container_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ container_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@@ -35,7 +35,7 @@ func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest)
return res, err
}
- audit.LogRequest(a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
@@ -47,7 +47,7 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(a.log, container_grpc.ContainerService_Get_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
@@ -58,18 +58,29 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(a.log, container_grpc.ContainerService_List_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
return res, err
}
+// ListStream implements Server.
+func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ err := a.next.ListStream(req, stream)
+ if !a.enabled.Load() {
+ return err
+ }
+ audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
+ return err
+}
+
// Put implements Server.
func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
res, err := a.next.Put(ctx, req)
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(a.log, container_grpc.ContainerService_Put_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req,
audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index 0917e3bd0..cdd0d2514 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -4,9 +4,9 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
type ServiceExecutor interface {
@@ -14,6 +14,7 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
+ ListStream(context.Context, *container.ListStreamRequest, ListStream) error
}
type executorSvc struct {
@@ -93,3 +94,11 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
s.respSvc.SetMeta(resp)
return resp, nil
}
+
+func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ err := s.exec.ListStream(stream.Context(), req, stream)
+ if err != nil {
+ return fmt.Errorf("could not execute ListStream request: %w", err)
+ }
+ return nil
+}
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index 05d8749cf..eaa608eba 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -5,11 +5,11 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -25,20 +25,20 @@ type morphExecutor struct {
// Reader is an interface of read-only container storage.
type Reader interface {
containercore.Source
- containercore.EACLSource
// ContainersOf returns a list of container identifiers belonging
// to the specified user of FrostFS system. Returns the identifiers
// of all FrostFS containers if pointer to owner identifier is nil.
- ContainersOf(*user.ID) ([]cid.ID, error)
+ ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
+ IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
}
// Writer is an interface of container storage updater.
type Writer interface {
// Put stores specified container in the side chain.
- Put(containercore.Container) (*cid.ID, error)
+ Put(context.Context, containercore.Container) (*cid.ID, error)
// Delete removes specified container from the side chain.
- Delete(containercore.RemovalWitness) error
+ Delete(context.Context, containercore.RemovalWitness) error
}
func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
@@ -48,7 +48,7 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
+func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
sigV2 := body.GetSignature()
if sigV2 == nil {
// TODO(@cthulhu-rider): #468 use "const" error
@@ -81,7 +81,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con
}
}
- idCnr, err := s.wrt.Put(cnr)
+ idCnr, err := s.wrt.Put(ctx, cnr)
if err != nil {
return nil, err
}
@@ -95,7 +95,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con
return res, nil
}
-func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
+func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -125,7 +125,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *
rmWitness.Signature = body.GetSignature()
rmWitness.SessionToken = tok
- err = s.wrt.Delete(rmWitness)
+ err = s.wrt.Delete(ctx, rmWitness)
if err != nil {
return nil, err
}
@@ -133,7 +133,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *
return new(container.DeleteResponseBody), nil
}
-func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
+func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -146,7 +146,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- cnr, err := s.rdr.Get(id)
+ cnr, err := s.rdr.Get(ctx, id)
if err != nil {
return nil, err
}
@@ -173,7 +173,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (
return res, nil
}
-func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
+func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errMissingUserID
@@ -186,7 +186,7 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody)
return nil, fmt.Errorf("invalid user ID: %w", err)
}
- cnrs, err := s.rdr.ContainersOf(&id)
+ cnrs, err := s.rdr.ContainersOf(ctx, &id)
if err != nil {
return nil, err
}
@@ -201,3 +201,56 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody)
return res, nil
}
+
+func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error {
+ body := req.GetBody()
+ idV2 := body.GetOwnerID()
+ if idV2 == nil {
+ return errMissingUserID
+ }
+
+ var id user.ID
+
+ err := id.ReadFromV2(*idV2)
+ if err != nil {
+ return fmt.Errorf("invalid user ID: %w", err)
+ }
+
+ resBody := new(container.ListStreamResponseBody)
+ r := new(container.ListStreamResponse)
+ r.SetBody(resBody)
+
+ var cidList []refs.ContainerID
+
+ // Amount of containers to send at once.
+ const batchSize = 1000
+
+ processCID := func(id cid.ID) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var refID refs.ContainerID
+ id.WriteToV2(&refID)
+ cidList = append(cidList, refID)
+ if len(cidList) == batchSize {
+ r.GetBody().SetContainerIDs(cidList)
+ cidList = cidList[:0]
+ return stream.Send(r)
+ }
+ return nil
+ }
+
+ if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil {
+ return err
+ }
+
+ if len(cidList) > 0 {
+ r.GetBody().SetContainerIDs(cidList)
+ return stream.Send(r)
+ }
+
+ return nil
+}
diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go
index 560c69232..1f6fdb0be 100644
--- a/pkg/services/container/morph/executor_test.go
+++ b/pkg/services/container/morph/executor_test.go
@@ -4,12 +4,12 @@ import (
"context"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
containerSvcMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test"
@@ -24,15 +24,11 @@ type mock struct {
containerSvcMorph.Reader
}
-func (m mock) Put(_ containerCore.Container) (*cid.ID, error) {
+func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) {
return new(cid.ID), nil
}
-func (m mock) Delete(_ containerCore.RemovalWitness) error {
- return nil
-}
-
-func (m mock) PutEACL(_ containerCore.EACL) error {
+func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error {
return nil
}
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index a19d83c56..d9208077d 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -3,7 +3,8 @@ package container
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
)
// Server is an interface of the FrostFS API Container service server.
@@ -12,4 +13,11 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
+ ListStream(*container.ListStreamRequest, ListStream) error
+}
+
+// ListStream is an interface of FrostFS API v2 compatible search streamer.
+type ListStream interface {
+ util.ServerStream
+ Send(*container.ListStreamResponse) error
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index f7f5d6486..85fe7ae87 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
)
type signService struct {
@@ -56,3 +56,40 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co
resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req))
return resp, s.sigSvc.SignResponse(resp, err)
}
+
+func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.ListStreamResponse)
+ _ = s.sigSvc.SignResponse(resp, err)
+ return stream.Send(resp)
+ }
+
+ ss := &listStreamSigner{
+ ListStream: stream,
+ sigSvc: s.sigSvc,
+ }
+ err := s.svc.ListStream(req, ss)
+ if err != nil || !ss.nonEmptyResp {
+ return ss.send(new(container.ListStreamResponse), err)
+ }
+ return nil
+}
+
+type listStreamSigner struct {
+ ListStream
+ sigSvc *util.SignService
+
+ nonEmptyResp bool // set on first Send call
+}
+
+func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error {
+ s.nonEmptyResp = true
+ return s.send(resp, nil)
+}
+
+func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error {
+ if err := s.sigSvc.SignResponse(resp, err); err != nil {
+ return err
+ }
+ return s.ListStream.Send(resp)
+}
diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go
new file mode 100644
index 000000000..4f8708da7
--- /dev/null
+++ b/pkg/services/container/transport_splitter.go
@@ -0,0 +1,92 @@
+package container
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+)
+
+type (
+ TransportSplitter struct {
+ next Server
+
+ respSvc *response.Service
+ cnrAmount uint32
+ }
+
+ listStreamMsgSizeCtrl struct {
+ util.ServerStream
+ stream ListStream
+ respSvc *response.Service
+ cnrAmount uint32
+ }
+)
+
+func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server {
+ return &TransportSplitter{
+ next: next,
+ respSvc: respSvc,
+ cnrAmount: cnrAmount,
+ }
+}
+
+func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
+ return s.next.Put(ctx, req)
+}
+
+func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
+ return s.next.Delete(ctx, req)
+}
+
+func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
+ return s.next.Get(ctx, req)
+}
+
+func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
+ return s.next.List(ctx, req)
+}
+
+func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ return s.next.ListStream(req, &listStreamMsgSizeCtrl{
+ ServerStream: stream,
+ stream: stream,
+ respSvc: s.respSvc,
+ cnrAmount: s.cnrAmount,
+ })
+}
+
+func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error {
+ s.respSvc.SetMeta(resp)
+ body := resp.GetBody()
+ ids := body.GetContainerIDs()
+
+ var newResp *container.ListStreamResponse
+
+ for {
+ if newResp == nil {
+ newResp = new(container.ListStreamResponse)
+ newResp.SetBody(body)
+ }
+
+ cut := min(s.cnrAmount, uint32(len(ids)))
+
+ body.SetContainerIDs(ids[:cut])
+ newResp.SetMetaHeader(resp.GetMetaHeader())
+ newResp.SetVerificationHeader(resp.GetVerificationHeader())
+
+ if err := s.stream.Send(newResp); err != nil {
+ return fmt.Errorf("TransportSplitter: %w", err)
+ }
+
+ ids = ids[cut:]
+
+ if len(ids) == 0 {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go
index fd6f020d1..37daf67be 100644
--- a/pkg/services/control/convert.go
+++ b/pkg/services/control/convert.go
@@ -1,8 +1,8 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message"
)
type requestWrapper struct {
diff --git a/pkg/services/control/ir/convert.go b/pkg/services/control/ir/convert.go
index c892c5b6c..024676b87 100644
--- a/pkg/services/control/ir/convert.go
+++ b/pkg/services/control/ir/convert.go
@@ -1,8 +1,8 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message"
)
type requestWrapper struct {
diff --git a/pkg/services/control/ir/rpc.go b/pkg/services/control/ir/rpc.go
index 0c9400f6c..62f800d99 100644
--- a/pkg/services/control/ir/rpc.go
+++ b/pkg/services/control/ir/rpc.go
@@ -1,9 +1,9 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
)
const serviceName = "ircontrol.ControlService"
diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go
index 9f7a8b879..d9f65a2fc 100644
--- a/pkg/services/control/ir/server/audit.go
+++ b/pkg/services/control/ir/server/audit.go
@@ -6,10 +6,10 @@ import (
"strings"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@@ -36,7 +36,7 @@ func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheck
if !a.enabled.Load() {
return res, err
}
- audit.LogRequestWithKey(a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
return res, err
}
@@ -79,7 +79,7 @@ func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveC
}
}
- audit.LogRequestWithKey(a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
return res, err
}
@@ -90,7 +90,7 @@ func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRe
return res, err
}
- audit.LogRequestWithKey(a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil)
return res, err
}
@@ -102,7 +102,7 @@ func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequ
return res, err
}
- audit.LogRequestWithKey(a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
nil, err == nil)
return res, err
}
diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go
index 2447a8a74..0509d2646 100644
--- a/pkg/services/control/ir/server/calls.go
+++ b/pkg/services/control/ir/server/calls.go
@@ -5,10 +5,10 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"google.golang.org/grpc/codes"
@@ -40,7 +40,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest)
// TickEpoch forces a new epoch.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
+func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -48,12 +48,12 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c
resp := new(control.TickEpochResponse)
resp.SetBody(new(control.TickEpochResponse_Body))
- epoch, err := s.netmapClient.Epoch()
+ epoch, err := s.netmapClient.Epoch(ctx)
if err != nil {
return nil, fmt.Errorf("getting current epoch: %w", err)
}
- vub, err := s.netmapClient.NewEpochControl(epoch+1, req.GetBody().GetVub())
+ vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub())
if err != nil {
return nil, fmt.Errorf("forcing new epoch: %w", err)
}
@@ -69,7 +69,7 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c
// RemoveNode forces a node removal.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
+func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -77,7 +77,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (
resp := new(control.RemoveNodeResponse)
resp.SetBody(new(control.RemoveNodeResponse_Body))
- nm, err := s.netmapClient.NetMap()
+ nm, err := s.netmapClient.NetMap(ctx)
if err != nil {
return nil, fmt.Errorf("getting netmap: %w", err)
}
@@ -91,11 +91,11 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (
if len(nodeInfo.PublicKey()) == 0 {
return nil, status.Error(codes.NotFound, "no such node")
}
- if nodeInfo.IsOffline() {
+ if nodeInfo.Status().IsOffline() {
return nil, status.Error(codes.FailedPrecondition, "node is already offline")
}
- vub, err := s.netmapClient.ForceRemovePeer(nodeInfo, req.GetBody().GetVub())
+ vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub())
if err != nil {
return nil, fmt.Errorf("forcing node removal: %w", err)
}
@@ -109,7 +109,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (
}
// RemoveContainer forces a container removal.
-func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
+func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -124,7 +124,7 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer
return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error())
}
var err error
- vub, err = s.removeContainer(containerID, req.GetBody().GetVub())
+ vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
if err != nil {
return nil, err
}
@@ -138,13 +138,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer
return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error())
}
- cids, err := s.containerClient.ContainersOf(&owner)
+ cids, err := s.containerClient.ContainersOf(ctx, &owner)
if err != nil {
return nil, fmt.Errorf("failed to get owner's containers: %w", err)
}
for _, containerID := range cids {
- vub, err = s.removeContainer(containerID, req.GetBody().GetVub())
+ vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
if err != nil {
return nil, err
}
@@ -162,13 +162,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer
return resp, nil
}
-func (s *Server) removeContainer(containerID cid.ID, vub uint32) (uint32, error) {
+func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) {
var prm container.DeletePrm
prm.SetCID(containerID[:])
prm.SetControlTX(true)
prm.SetVUB(vub)
- vub, err := s.containerClient.Delete(prm)
+ vub, err := s.containerClient.Delete(ctx, prm)
if err != nil {
return 0, fmt.Errorf("forcing container removal: %w", err)
}
diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go
index 0c2de5300..9d5cfefc8 100644
--- a/pkg/services/control/ir/server/deps.go
+++ b/pkg/services/control/ir/server/deps.go
@@ -5,7 +5,7 @@ import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // Must calculate and return current health status of the IR application.
+ // HealthStatus must calculate and return current health status of the IR application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go
index c2a4f88a6..0cfca71c1 100644
--- a/pkg/services/control/ir/server/server.go
+++ b/pkg/services/control/ir/server/server.go
@@ -35,8 +35,7 @@ func panicOnPrmValue(n string, v any) {
// the parameterized private key.
func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server {
// verify required parameters
- switch {
- case prm.healthChecker == nil:
+ if prm.healthChecker == nil {
panicOnPrmValue("health checker", prm.healthChecker)
}
diff --git a/pkg/services/control/ir/server/sign.go b/pkg/services/control/ir/server/sign.go
index f72d51f9e..d39f6d5f9 100644
--- a/pkg/services/control/ir/server/sign.go
+++ b/pkg/services/control/ir/server/sign.go
@@ -6,8 +6,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
)
diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go
index 66d196617..d27746263 100644
--- a/pkg/services/control/ir/service_frostfs.pb.go
+++ b/pkg/services/control/ir/service_frostfs.pb.go
@@ -5,9 +5,9 @@ package control
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -233,14 +233,25 @@ func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -375,11 +386,22 @@ func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"healthStatus\":"
- out.RawString(prefix[1:])
- out.Int32(int32(x.HealthStatus))
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"healthStatus\":"
+ out.RawString(prefix)
+ v := int32(x.HealthStatus)
+ if vv, ok := HealthStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
out.RawByte('}')
}
@@ -564,14 +586,25 @@ func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -706,10 +739,16 @@ func (x *TickEpochRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"vub\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
out.Uint32(x.Vub)
}
out.RawByte('}')
@@ -743,7 +782,15 @@ func (x *TickEpochRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "vub":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Vub = f
}
}
@@ -879,14 +926,25 @@ func (x *TickEpochRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1021,10 +1079,16 @@ func (x *TickEpochResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"vub\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
out.Uint32(x.Vub)
}
out.RawByte('}')
@@ -1058,7 +1122,15 @@ func (x *TickEpochResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "vub":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Vub = f
}
}
@@ -1194,14 +1266,25 @@ func (x *TickEpochResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1356,14 +1439,29 @@ func (x *RemoveNodeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"key\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.Key)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ if x.Key != nil {
+ out.Base64Bytes(x.Key)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"vub\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
out.RawString(prefix)
out.Uint32(x.Vub)
}
@@ -1398,13 +1496,27 @@ func (x *RemoveNodeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "key":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Key = f
}
case "vub":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Vub = f
}
}
@@ -1540,14 +1652,25 @@ func (x *RemoveNodeRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1682,10 +1805,16 @@ func (x *RemoveNodeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"vub\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
out.Uint32(x.Vub)
}
out.RawByte('}')
@@ -1719,7 +1848,15 @@ func (x *RemoveNodeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "vub":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Vub = f
}
}
@@ -1855,14 +1992,25 @@ func (x *RemoveNodeResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2037,19 +2185,43 @@ func (x *RemoveContainerRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
- }
- {
- const prefix string = ",\"owner\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
out.RawString(prefix)
- out.Base64Bytes(x.Owner)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"vub\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"owner\":"
+ out.RawString(prefix)
+ if x.Owner != nil {
+ out.Base64Bytes(x.Owner)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
out.RawString(prefix)
out.Uint32(x.Vub)
}
@@ -2084,19 +2256,39 @@ func (x *RemoveContainerRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "owner":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Owner = f
}
case "vub":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Vub = f
}
}
@@ -2232,14 +2424,25 @@ func (x *RemoveContainerRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2374,10 +2577,16 @@ func (x *RemoveContainerResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"vub\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
out.Uint32(x.Vub)
}
out.RawByte('}')
@@ -2411,7 +2620,15 @@ func (x *RemoveContainerResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "vub":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Vub = f
}
}
@@ -2547,14 +2764,25 @@ func (x *RemoveContainerResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go
index b230726a9..407eec6ad 100644
--- a/pkg/services/control/ir/types_frostfs.pb.go
+++ b/pkg/services/control/ir/types_frostfs.pb.go
@@ -5,9 +5,9 @@ package control
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -155,16 +155,35 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"key\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.Key)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ if x.Key != nil {
+ out.Base64Bytes(x.Key)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
- out.Base64Bytes(x.Sign)
+ if x.Sign != nil {
+ out.Base64Bytes(x.Sign)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -197,13 +216,25 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "key":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Key = f
}
case "signature":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Sign = f
}
}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 80aece008..0c4236d0e 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -1,8 +1,10 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common"
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
)
const serviceName = "control.ControlService"
@@ -15,7 +17,6 @@ const (
rpcListShards = "ListShards"
rpcSetShardMode = "SetShardMode"
rpcSynchronizeTree = "SynchronizeTree"
- rpcEvacuateShard = "EvacuateShard"
rpcStartShardEvacuation = "StartShardEvacuation"
rpcGetShardEvacuationStatus = "GetShardEvacuationStatus"
rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus"
@@ -31,6 +32,7 @@ const (
rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides"
rpcDetachShards = "DetachShards"
rpcStartShardRebuild = "StartShardRebuild"
+ rpcListShardsForObject = "ListShardsForObject"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -74,6 +76,7 @@ func SetNetmapStatus(
// GetNetmapStatus executes ControlService.GetNetmapStatus RPC.
func GetNetmapStatus(
+ _ context.Context,
cli *client.Client,
req *GetNetmapStatusRequest,
opts ...client.CallOption,
@@ -162,19 +165,6 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl
return wResp.message, nil
}
-// EvacuateShard executes ControlService.EvacuateShard RPC.
-func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) {
- wResp := newResponseWrapper[EvacuateShardResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
// StartShardEvacuation executes ControlService.StartShardEvacuation RPC.
func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) {
wResp := newResponseWrapper[StartShardEvacuationResponse]()
@@ -292,7 +282,7 @@ func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverride
return wResp.message, nil
}
-// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
+// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) {
wResp := newResponseWrapper[GetChainLocalOverrideResponse]()
wReq := &requestWrapper{m: req}
@@ -375,3 +365,22 @@ func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts .
return wResp.message, nil
}
+
+// ListShardsForObject executes ControlService.ListShardsForObject RPC.
+func ListShardsForObject(
+ cli *client.Client,
+ req *ListShardsForObjectRequest,
+ opts ...client.CallOption,
+) (*ListShardsForObjectResponse, error) {
+ wResp := newResponseWrapper[ListShardsForObjectResponse]()
+
+ wReq := &requestWrapper{
+ m: req,
+ }
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
diff --git a/pkg/services/control/server/ctrlmessage/sign.go b/pkg/services/control/server/ctrlmessage/sign.go
index 31425b337..d9d5c5f5e 100644
--- a/pkg/services/control/server/ctrlmessage/sign.go
+++ b/pkg/services/control/server/ctrlmessage/sign.go
@@ -4,8 +4,8 @@ import (
"crypto/ecdsa"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
)
diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go
index a4111bddb..ffd36962b 100644
--- a/pkg/services/control/server/detach_shards.go
+++ b/pkg/services/control/server/detach_shards.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
+func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -19,7 +19,7 @@ func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsReques
shardIDs := s.getShardIDList(req.GetBody().GetShard_ID())
- if err := s.s.DetachShards(shardIDs); err != nil {
+ if err := s.s.DetachShards(ctx, shardIDs); err != nil {
if errors.As(err, new(logicerr.Logical)) {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
deleted file mode 100644
index ae3413373..000000000
--- a/pkg/services/control/server/evacuate.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package control
-
-import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/hex"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
-
-func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- prm := engine.EvacuateShardPrm{
- ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
- IgnoreErrors: req.GetBody().GetIgnoreErrors(),
- ObjectsHandler: s.replicateObject,
- Scope: engine.EvacuateScopeObjects,
- }
-
- res, err := s.s.Evacuate(ctx, prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := &control.EvacuateShardResponse{
- Body: &control.EvacuateShardResponse_Body{
- Count: uint32(res.ObjectsEvacuated()),
- },
- }
-
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- cid, ok := obj.ContainerID()
- if !ok {
- // Return nil to prevent situations where a shard can't be evacuated
- // because of a single bad/corrupted object.
- return false, nil
- }
-
- nodes, err := s.getContainerNodes(cid)
- if err != nil {
- return false, err
- }
-
- if len(nodes) == 0 {
- return false, nil
- }
-
- var res replicatorResult
- task := replicator.Task{
- NumCopies: 1,
- Addr: addr,
- Obj: obj,
- Nodes: nodes,
- }
- s.replicator.HandleReplicationTask(ctx, task, &res)
-
- if res.count == 0 {
- return false, errors.New("object was not replicated")
- }
- return true, nil
-}
-
-func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
- nodes, err := s.getContainerNodes(contID)
- if err != nil {
- return false, "", err
- }
- if len(nodes) == 0 {
- return false, "", nil
- }
-
- for _, node := range nodes {
- err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
- if err == nil {
- return true, hex.EncodeToString(node.PublicKey()), nil
- }
- }
- return false, "", err
-}
-
-func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
- rawCID := make([]byte, sha256.Size)
- contID.Encode(rawCID)
-
- var height uint64
- for {
- op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
- if err != nil {
- return err
- }
-
- if op.Time == 0 {
- return nil
- }
-
- req := &tree.ApplyRequest{
- Body: &tree.ApplyRequest_Body{
- ContainerId: rawCID,
- TreeId: treeID,
- Operation: &tree.LogMove{
- ParentId: op.Parent,
- Meta: op.Meta.Bytes(),
- ChildId: op.Child,
- },
- },
- }
-
- err = tree.SignMessage(req, s.key)
- if err != nil {
- return fmt.Errorf("can't message apply request: %w", err)
- }
-
- err = s.treeService.ReplicateTreeOp(ctx, node, req)
- if err != nil {
- return err
- }
-
- height = op.Time + 1
- }
-}
-
-func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
- nm, err := s.netMapSrc.GetNetMap(0)
- if err != nil {
- return nil, err
- }
-
- c, err := s.cnrSrc.Get(contID)
- if err != nil {
- return nil, err
- }
-
- binCnr := make([]byte, sha256.Size)
- contID.Encode(binCnr)
-
- ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
- if err != nil {
- return nil, errFailedToBuildListOfContainerNodes
- }
-
- nodes := placement.FlattenNodes(ns)
- bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
- if bytes.Equal(nodes[i].PublicKey(), bs) {
- copy(nodes[i:], nodes[i+1:])
- nodes = nodes[:len(nodes)-1]
- }
- }
- return nodes, nil
-}
-
-type replicatorResult struct {
- count int
-}
-
-// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
-func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
- r.count++
-}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index aacebe9e3..f3ba9015e 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -1,17 +1,32 @@
package control
import (
+ "bytes"
"context"
+ "crypto/sha256"
+ "encoding/hex"
"errors"
+ "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
+var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
+
func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) {
err := s.isValidRequest(req)
if err != nil {
@@ -23,16 +38,17 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
}
prm := engine.EvacuateShardPrm{
- ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
- IgnoreErrors: req.GetBody().GetIgnoreErrors(),
- ObjectsHandler: s.replicateObject,
- TreeHandler: s.replicateTree,
- Async: true,
- Scope: engine.EvacuateScope(req.GetBody().GetScope()),
+ ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
+ IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ ObjectsHandler: s.replicateObject,
+ TreeHandler: s.replicateTree,
+ Scope: engine.EvacuateScope(req.GetBody().GetScope()),
+ ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
+ ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
+ RepOneOnly: req.GetBody().GetRepOneOnly(),
}
- _, err = s.s.Evacuate(ctx, prm)
- if err != nil {
+ if err = s.s.Evacuate(ctx, prm); err != nil {
var logicalErr logicerr.Logical
if errors.As(err, &logicalErr) {
return nil, status.Error(codes.Aborted, err.Error())
@@ -132,3 +148,133 @@ func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.Re
}
return resp, nil
}
+
+func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
+ cid, ok := obj.ContainerID()
+ if !ok {
+ // Return nil to prevent situations where a shard can't be evacuated
+ // because of a single bad/corrupted object.
+ return false, nil
+ }
+
+ nodes, err := s.getContainerNodes(ctx, cid)
+ if err != nil {
+ return false, err
+ }
+
+ if len(nodes) == 0 {
+ return false, nil
+ }
+
+ var res replicatorResult
+ task := replicator.Task{
+ NumCopies: 1,
+ Addr: addr,
+ Obj: obj,
+ Nodes: nodes,
+ }
+ s.replicator.HandleReplicationTask(ctx, task, &res)
+
+ if res.count == 0 {
+ return false, errors.New("object was not replicated")
+ }
+ return true, nil
+}
+
+func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
+ nodes, err := s.getContainerNodes(ctx, contID)
+ if err != nil {
+ return false, "", err
+ }
+ if len(nodes) == 0 {
+ return false, "", nil
+ }
+
+ for _, node := range nodes {
+ err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
+ if err == nil {
+ return true, hex.EncodeToString(node.PublicKey()), nil
+ }
+ }
+ return false, "", err
+}
+
+func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
+ rawCID := make([]byte, sha256.Size)
+ contID.Encode(rawCID)
+
+ var height uint64
+ for {
+ op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
+ if err != nil {
+ return err
+ }
+
+ if op.Time == 0 {
+ return nil
+ }
+
+ req := &tree.ApplyRequest{
+ Body: &tree.ApplyRequest_Body{
+ ContainerId: rawCID,
+ TreeId: treeID,
+ Operation: &tree.LogMove{
+ ParentId: op.Parent,
+ Meta: op.Bytes(),
+ ChildId: op.Child,
+ },
+ },
+ }
+
+ err = tree.SignMessage(req, s.key)
+ if err != nil {
+ return fmt.Errorf("can't message apply request: %w", err)
+ }
+
+ err = s.treeService.ReplicateTreeOp(ctx, node, req)
+ if err != nil {
+ return err
+ }
+
+ height = op.Time + 1
+ }
+}
+
+func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) {
+ nm, err := s.netMapSrc.GetNetMap(ctx, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := s.cnrSrc.Get(ctx, contID)
+ if err != nil {
+ return nil, err
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ contID.Encode(binCnr)
+
+ ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
+ if err != nil {
+ return nil, errFailedToBuildListOfContainerNodes
+ }
+
+ nodes := placement.FlattenNodes(ns)
+ bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
+ for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
+ if bytes.Equal(nodes[i].PublicKey(), bs) {
+ copy(nodes[i:], nodes[i+1:])
+ nodes = nodes[:len(nodes)-1]
+ }
+ }
+ return nodes, nil
+}
+
+type replicatorResult struct {
+ count int
+}
+
+// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
+func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
+ r.count++
+}
diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go
index d9fefc38e..a8ef7809e 100644
--- a/pkg/services/control/server/gc.go
+++ b/pkg/services/control/server/gc.go
@@ -42,8 +42,7 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques
prm.WithForceRemoval()
prm.WithAddress(addrList[i])
- _, err := s.s.Delete(ctx, prm)
- if err != nil && firstErr == nil {
+ if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil {
firstErr = err
}
}
diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go
index 1c038253a..5e0496910 100644
--- a/pkg/services/control/server/get_netmap_status.go
+++ b/pkg/services/control/server/get_netmap_status.go
@@ -10,12 +10,12 @@ import (
)
// GetNetmapStatus gets node status in FrostFS network.
-func (s *Server) GetNetmapStatus(_ context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
+func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
- st, epoch, err := s.nodeState.GetNetmapStatus()
+ st, epoch, err := s.nodeState.GetNetmapStatus(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go
new file mode 100644
index 000000000..39565ed50
--- /dev/null
+++ b/pkg/services/control/server/list_shards_for_object.go
@@ -0,0 +1,65 @@
+package control
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ var obj oid.ID
+ err = obj.DecodeString(req.GetBody().GetObjectId())
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ var cnr cid.ID
+ err = cnr.DecodeString(req.GetBody().GetContainerId())
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ resp := new(control.ListShardsForObjectResponse)
+ body := new(control.ListShardsForObjectResponse_Body)
+ resp.SetBody(body)
+
+ var objAddr oid.Address
+ objAddr.SetContainer(cnr)
+ objAddr.SetObject(obj)
+ info, err := s.s.ListShardsForObject(ctx, objAddr)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ if len(info) == 0 {
+ return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject)
+ }
+
+ body.SetShard_ID(shardInfoToProto(info))
+
+ // Sign the response
+ if err := ctrlmessage.Sign(s.key, resp); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func shardInfoToProto(infos []shard.Info) [][]byte {
+ shardInfos := make([][]byte, 0, len(infos))
+ for _, info := range infos {
+ shardInfos = append(shardInfos, *info.ID)
+ }
+
+ return shardInfos
+}
diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go
index f3fe56a46..59d701bc6 100644
--- a/pkg/services/control/server/server.go
+++ b/pkg/services/control/server/server.go
@@ -1,6 +1,7 @@
package control
import (
+ "context"
"crypto/ecdsa"
"sync/atomic"
@@ -26,13 +27,13 @@ type Server struct {
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // Must calculate and return current status of the node in FrostFS network map.
+ // NetmapStatus must calculate and return current status of the node in FrostFS network map.
//
// If status can not be calculated for any reason,
// control.netmapStatus_STATUS_UNDEFINED should be returned.
NetmapStatus() control.NetmapStatus
- // Must calculate and return current health status of the node application.
+ // HealthStatus must calculate and return current health status of the node application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
@@ -45,13 +46,13 @@ type NodeState interface {
//
// If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed
// in the network settings, the node additionally starts local maintenance.
- SetNetmapStatus(st control.NetmapStatus) error
+ SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error
// ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE)
// but starts local maintenance regardless of the network settings.
- ForceMaintenance() error
+ ForceMaintenance(ctx context.Context) error
- GetNetmapStatus() (control.NetmapStatus, uint64, error)
+ GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error)
}
// LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine
diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go
index 3fd69df12..529041dca 100644
--- a/pkg/services/control/server/set_netmap_status.go
+++ b/pkg/services/control/server/set_netmap_status.go
@@ -12,7 +12,7 @@ import (
// SetNetmapStatus sets node status in FrostFS network.
//
// If request is unsigned or signed by disallowed key, permission error returns.
-func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
+func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
// verify request
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -29,9 +29,9 @@ func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatus
"force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE)
}
- err = s.nodeState.ForceMaintenance()
+ err = s.nodeState.ForceMaintenance(ctx)
} else {
- err = s.nodeState.SetNetmapStatus(st)
+ err = s.nodeState.SetNetmapStatus(ctx, st)
}
if err != nil {
diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go
index 52835c41d..4f8796263 100644
--- a/pkg/services/control/server/set_shard_mode.go
+++ b/pkg/services/control/server/set_shard_mode.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
+func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
// verify request
err := s.isValidRequest(req)
if err != nil {
@@ -38,7 +38,7 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques
}
for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
- err = s.s.SetShardMode(shardID, m, req.GetBody().GetResetErrorCounter())
+ err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/sign.go b/pkg/services/control/server/sign.go
index 514af273f..0e8e24b6e 100644
--- a/pkg/services/control/server/sign.go
+++ b/pkg/services/control/server/sign.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
)
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 04994328a..4c539acfc 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -30,11 +30,6 @@ service ControlService {
// Synchronizes all log operations for the specified tree.
rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
- // EvacuateShard moves all data from one shard to the others.
- // Deprecated: Use
- // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
- rpc EvacuateShard(EvacuateShardRequest) returns (EvacuateShardResponse);
-
// StartShardEvacuation starts moving all data from one shard to the others.
rpc StartShardEvacuation(StartShardEvacuationRequest)
returns (StartShardEvacuationResponse);
@@ -94,6 +89,9 @@ service ControlService {
// StartShardRebuild starts shard rebuild process.
rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse);
+
+ // ListShardsForObject returns shard info where object is stored.
+ rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse);
}
// Health check request.
@@ -394,6 +392,12 @@ message StartShardEvacuationRequest {
bool ignore_errors = 2;
// Evacuation scope.
uint32 scope = 3;
+ // Count of concurrent container evacuation workers.
+ uint32 container_worker_count = 4;
+ // Count of concurrent object evacuation workers.
+ uint32 object_worker_count = 5;
+ // Choose for evacuation objects in `REP 1` containers only.
+ bool rep_one_only = 6;
}
Body body = 1;
@@ -728,3 +732,23 @@ message StartShardRebuildResponse {
Signature signature = 2;
}
+
+message ListShardsForObjectRequest {
+ message Body {
+ string object_id = 1;
+ string container_id = 2;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message ListShardsForObjectResponse {
+ message Body {
+ // List of the node's shards storing object.
+ repeated bytes shard_ID = 1;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 019cac290..44849d591 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -5,9 +5,9 @@ package control
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -233,14 +233,25 @@ func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -395,16 +406,37 @@ func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"netmapStatus\":"
- out.RawString(prefix[1:])
- out.Int32(int32(x.NetmapStatus))
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"netmapStatus\":"
+ out.RawString(prefix)
+ v := int32(x.NetmapStatus)
+ if vv, ok := NetmapStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
{
- const prefix string = ",\"healthStatus\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"healthStatus\":"
out.RawString(prefix)
- out.Int32(int32(x.HealthStatus))
+ v := int32(x.HealthStatus)
+ if vv, ok := HealthStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
out.RawByte('}')
}
@@ -611,14 +643,25 @@ func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -773,14 +816,30 @@ func (x *SetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"status\":"
- out.RawString(prefix[1:])
- out.Int32(int32(x.Status))
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"status\":"
+ out.RawString(prefix)
+ v := int32(x.Status)
+ if vv, ok := NetmapStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
{
- const prefix string = ",\"forceMaintenance\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"forceMaintenance\":"
out.RawString(prefix)
out.Bool(x.ForceMaintenance)
}
@@ -973,14 +1032,25 @@ func (x *SetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1254,14 +1324,25 @@ func (x *SetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1535,14 +1616,25 @@ func (x *GetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1697,16 +1789,34 @@ func (x *GetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"status\":"
- out.RawString(prefix[1:])
- out.Int32(int32(x.Status))
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"status\":"
+ out.RawString(prefix)
+ v := int32(x.Status)
+ if vv, ok := NetmapStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
{
- const prefix string = ",\"epoch\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"epoch\":"
out.RawString(prefix)
- out.Uint64(x.Epoch)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -1761,7 +1871,15 @@ func (x *GetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "epoch":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.Epoch = f
}
}
@@ -1897,14 +2015,25 @@ func (x *GetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2039,16 +2168,26 @@ func (x *DropObjectsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"addressList\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"addressList\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.AddressList {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.AddressList[i])
+ if x.AddressList[i] != nil {
+ out.Base64Bytes(x.AddressList[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
@@ -2086,7 +2225,13 @@ func (x *DropObjectsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -2226,14 +2371,25 @@ func (x *DropObjectsRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2507,14 +2663,25 @@ func (x *DropObjectsResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2788,14 +2955,25 @@ func (x *ListShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2936,10 +3114,16 @@ func (x *ListShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shards\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shards\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Shards {
if i != 0 {
@@ -3124,14 +3308,25 @@ func (x *ListShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3306,26 +3501,51 @@ func (x *SetShardModeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Shard_ID[i])
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
{
- const prefix string = ",\"mode\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"mode\":"
out.RawString(prefix)
- out.Int32(int32(x.Mode))
+ v := int32(x.Mode)
+ if vv, ok := ShardMode_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
{
- const prefix string = ",\"resetErrorCounter\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"resetErrorCounter\":"
out.RawString(prefix)
out.Bool(x.ResetErrorCounter)
}
@@ -3363,7 +3583,13 @@ func (x *SetShardModeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -3531,14 +3757,25 @@ func (x *SetShardModeRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3812,14 +4049,25 @@ func (x *SetShardModeResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3994,21 +4242,43 @@ func (x *SynchronizeTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"height\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"height\":"
out.RawString(prefix)
- out.Uint64(x.Height)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -4041,7 +4311,13 @@ func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -4053,7 +4329,15 @@ func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "height":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.Height = f
}
}
@@ -4189,14 +4473,25 @@ func (x *SynchronizeTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4470,14 +4765,25 @@ func (x *SynchronizeTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4632,21 +4938,36 @@ func (x *EvacuateShardRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Shard_ID[i])
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
{
- const prefix string = ",\"ignoreErrors\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"ignoreErrors\":"
out.RawString(prefix)
out.Bool(x.IgnoreErrors)
}
@@ -4684,7 +5005,13 @@ func (x *EvacuateShardRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -4830,14 +5157,25 @@ func (x *EvacuateShardRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4972,10 +5310,16 @@ func (x *EvacuateShardResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"count\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"count\":"
+ out.RawString(prefix)
out.Uint32(x.Count)
}
out.RawByte('}')
@@ -5009,7 +5353,15 @@ func (x *EvacuateShardResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "count":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Count = f
}
}
@@ -5145,14 +5497,25 @@ func (x *EvacuateShardResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5307,21 +5670,36 @@ func (x *FlushCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Shard_ID[i])
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
{
- const prefix string = ",\"seal\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"seal\":"
out.RawString(prefix)
out.Bool(x.Seal)
}
@@ -5359,7 +5737,13 @@ func (x *FlushCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -5505,14 +5889,25 @@ func (x *FlushCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5786,14 +6181,25 @@ func (x *FlushCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5948,14 +6354,25 @@ func (x *DoctorRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"concurrency\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"concurrency\":"
+ out.RawString(prefix)
out.Uint32(x.Concurrency)
}
{
- const prefix string = ",\"removeDuplicates\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"removeDuplicates\":"
out.RawString(prefix)
out.Bool(x.RemoveDuplicates)
}
@@ -5990,7 +6407,15 @@ func (x *DoctorRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "concurrency":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Concurrency = f
}
case "removeDuplicates":
@@ -6132,14 +6557,25 @@ func (x *DoctorRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6413,14 +6849,25 @@ func (x *DoctorResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6511,9 +6958,12 @@ func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool {
}
type StartShardEvacuationRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- IgnoreErrors bool `json:"ignoreErrors"`
- Scope uint32 `json:"scope"`
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+ Scope uint32 `json:"scope"`
+ ContainerWorkerCount uint32 `json:"containerWorkerCount"`
+ ObjectWorkerCount uint32 `json:"objectWorkerCount"`
+ RepOneOnly bool `json:"repOneOnly"`
}
var (
@@ -6533,6 +6983,9 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
size += proto.UInt32Size(3, x.Scope)
+ size += proto.UInt32Size(4, x.ContainerWorkerCount)
+ size += proto.UInt32Size(5, x.ObjectWorkerCount)
+ size += proto.BoolSize(6, x.RepOneOnly)
return size
}
@@ -6558,6 +7011,15 @@ func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMar
if x.Scope != 0 {
mm.AppendUint32(3, x.Scope)
}
+ if x.ContainerWorkerCount != 0 {
+ mm.AppendUint32(4, x.ContainerWorkerCount)
+ }
+ if x.ObjectWorkerCount != 0 {
+ mm.AppendUint32(5, x.ObjectWorkerCount)
+ }
+ if x.RepOneOnly {
+ mm.AppendBool(6, x.RepOneOnly)
+ }
}
// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
@@ -6587,6 +7049,24 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err er
return fmt.Errorf("cannot unmarshal field %s", "Scope")
}
x.Scope = data
+ case 4: // ContainerWorkerCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount")
+ }
+ x.ContainerWorkerCount = data
+ case 5: // ObjectWorkerCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount")
+ }
+ x.ObjectWorkerCount = data
+ case 6: // RepOneOnly
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly")
+ }
+ x.RepOneOnly = data
}
}
return nil
@@ -6618,6 +7098,33 @@ func (x *StartShardEvacuationRequest_Body) GetScope() uint32 {
func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) {
x.Scope = v
}
+func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 {
+ if x != nil {
+ return x.ContainerWorkerCount
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) {
+ x.ContainerWorkerCount = v
+}
+func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 {
+ if x != nil {
+ return x.ObjectWorkerCount
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) {
+ x.ObjectWorkerCount = v
+}
+func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool {
+ if x != nil {
+ return x.RepOneOnly
+ }
+ return false
+}
+func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) {
+ x.RepOneOnly = v
+}
// MarshalJSON implements the json.Marshaler interface.
func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
@@ -6630,29 +7137,79 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Shard_ID[i])
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
{
- const prefix string = ",\"ignoreErrors\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"ignoreErrors\":"
out.RawString(prefix)
out.Bool(x.IgnoreErrors)
}
{
- const prefix string = ",\"scope\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"scope\":"
out.RawString(prefix)
out.Uint32(x.Scope)
}
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerWorkerCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ContainerWorkerCount)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"objectWorkerCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ObjectWorkerCount)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"repOneOnly\":"
+ out.RawString(prefix)
+ out.Bool(x.RepOneOnly)
+ }
out.RawByte('}')
}
@@ -6687,7 +7244,13 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -6703,9 +7266,51 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "scope":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Scope = f
}
+ case "containerWorkerCount":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.ContainerWorkerCount = f
+ }
+ case "objectWorkerCount":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.ObjectWorkerCount = f
+ }
+ case "repOneOnly":
+ {
+ var f bool
+ f = in.Bool()
+ x.RepOneOnly = f
+ }
}
in.WantComma()
}
@@ -6839,14 +7444,25 @@ func (x *StartShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7120,14 +7736,25 @@ func (x *StartShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7401,14 +8028,25 @@ func (x *GetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7578,11 +8216,19 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalEasyJSON(ou
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"value\":"
- out.RawString(prefix[1:])
- out.Int64(x.Value)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"value\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Value, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -7615,7 +8261,15 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalEasyJSON(
case "value":
{
var f int64
- f = in.Int64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := int64(v)
+ f = pv
x.Value = f
}
}
@@ -7707,11 +8361,19 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalEasyJSON(out *jw
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"seconds\":"
- out.RawString(prefix[1:])
- out.Int64(x.Seconds)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"seconds\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Seconds, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -7744,7 +8406,15 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalEasyJSON(in *j
case "seconds":
{
var f int64
- f = in.Int64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := int64(v)
+ f = pv
x.Seconds = f
}
}
@@ -8062,73 +8732,157 @@ func (x *GetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Wri
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"totalObjects\":"
- out.RawString(prefix[1:])
- out.Uint64(x.TotalObjects)
- }
- {
- const prefix string = ",\"evacuatedObjects\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"totalObjects\":"
out.RawString(prefix)
- out.Uint64(x.EvacuatedObjects)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalObjects, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"failedObjects\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"evacuatedObjects\":"
out.RawString(prefix)
- out.Uint64(x.FailedObjects)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedObjects, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"shardID\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"failedObjects\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedObjects, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Shard_ID[i])
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
{
- const prefix string = ",\"status\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"status\":"
out.RawString(prefix)
- out.Int32(int32(x.Status))
+ v := int32(x.Status)
+ if vv, ok := GetShardEvacuationStatusResponse_Body_Status_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
{
- const prefix string = ",\"duration\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"duration\":"
out.RawString(prefix)
x.Duration.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"startedAt\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"startedAt\":"
out.RawString(prefix)
x.StartedAt.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"errorMessage\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"errorMessage\":"
out.RawString(prefix)
out.String(x.ErrorMessage)
}
{
- const prefix string = ",\"skippedObjects\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"skippedObjects\":"
out.RawString(prefix)
- out.Uint64(x.SkippedObjects)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.SkippedObjects, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"totalTrees\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"totalTrees\":"
out.RawString(prefix)
- out.Uint64(x.TotalTrees)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalTrees, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"evacuatedTrees\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"evacuatedTrees\":"
out.RawString(prefix)
- out.Uint64(x.EvacuatedTrees)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedTrees, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"failedTrees\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"failedTrees\":"
out.RawString(prefix)
- out.Uint64(x.FailedTrees)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedTrees, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -8161,19 +8915,43 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex
case "totalObjects":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.TotalObjects = f
}
case "evacuatedObjects":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.EvacuatedObjects = f
}
case "failedObjects":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.FailedObjects = f
}
case "shardID":
@@ -8182,7 +8960,13 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -8234,25 +9018,57 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex
case "skippedObjects":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.SkippedObjects = f
}
case "totalTrees":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.TotalTrees = f
}
case "evacuatedTrees":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.EvacuatedTrees = f
}
case "failedTrees":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.FailedTrees = f
}
}
@@ -8388,14 +9204,25 @@ func (x *GetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -8669,14 +9496,25 @@ func (x *ResetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -8950,14 +9788,25 @@ func (x *ResetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -9231,14 +10080,25 @@ func (x *StopShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -9512,14 +10372,25 @@ func (x *StopShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -9677,16 +10548,31 @@ func (x *AddChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"target\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
x.Target.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"chain\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chain\":"
out.RawString(prefix)
- out.Base64Bytes(x.Chain)
+ if x.Chain != nil {
+ out.Base64Bytes(x.Chain)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -9726,7 +10612,13 @@ func (x *AddChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer)
case "chain":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Chain = f
}
}
@@ -9862,14 +10754,25 @@ func (x *AddChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -10004,11 +10907,21 @@ func (x *AddChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"chainId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ChainId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chainId\":"
+ out.RawString(prefix)
+ if x.ChainId != nil {
+ out.Base64Bytes(x.ChainId)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -10041,7 +10954,13 @@ func (x *AddChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer)
case "chainId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ChainId = f
}
}
@@ -10177,14 +11096,25 @@ func (x *AddChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -10342,16 +11272,31 @@ func (x *GetChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"target\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
x.Target.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"chainId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chainId\":"
out.RawString(prefix)
- out.Base64Bytes(x.ChainId)
+ if x.ChainId != nil {
+ out.Base64Bytes(x.ChainId)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -10391,7 +11336,13 @@ func (x *GetChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer)
case "chainId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ChainId = f
}
}
@@ -10527,14 +11478,25 @@ func (x *GetChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -10669,11 +11631,21 @@ func (x *GetChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"chain\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.Chain)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chain\":"
+ out.RawString(prefix)
+ if x.Chain != nil {
+ out.Base64Bytes(x.Chain)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -10706,7 +11678,13 @@ func (x *GetChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer)
case "chain":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Chain = f
}
}
@@ -10842,14 +11820,25 @@ func (x *GetChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -10987,10 +11976,16 @@ func (x *ListChainLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Write
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"target\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
x.Target.MarshalEasyJSON(out)
}
out.RawByte('}')
@@ -11161,14 +12156,25 @@ func (x *ListChainLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -11303,16 +12309,26 @@ func (x *ListChainLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writ
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"chains\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chains\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Chains {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Chains[i])
+ if x.Chains[i] != nil {
+ out.Base64Bytes(x.Chains[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
@@ -11350,7 +12366,13 @@ func (x *ListChainLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexe
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -11490,14 +12512,25 @@ func (x *ListChainLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -11632,10 +12665,16 @@ func (x *ListTargetsLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Wri
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"chainName\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chainName\":"
+ out.RawString(prefix)
out.String(x.ChainName)
}
out.RawByte('}')
@@ -11805,14 +12844,25 @@ func (x *ListTargetsLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -11953,10 +13003,16 @@ func (x *ListTargetsLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Wr
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"targets\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"targets\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Targets {
if i != 0 {
@@ -12141,14 +13197,25 @@ func (x *ListTargetsLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -12306,16 +13373,31 @@ func (x *RemoveChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writ
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"target\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
x.Target.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"chainId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chainId\":"
out.RawString(prefix)
- out.Base64Bytes(x.ChainId)
+ if x.ChainId != nil {
+ out.Base64Bytes(x.ChainId)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -12355,7 +13437,13 @@ func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexe
case "chainId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ChainId = f
}
}
@@ -12491,14 +13579,25 @@ func (x *RemoveChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -12772,14 +13871,25 @@ func (x *RemoveChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -12917,10 +14027,16 @@ func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalEasyJSON(out *jwr
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"target\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
x.Target.MarshalEasyJSON(out)
}
out.RawByte('}')
@@ -13091,14 +14207,25 @@ func (x *RemoveChainLocalOverridesByTargetRequest) MarshalEasyJSON(out *jwriter.
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -13372,14 +14499,25 @@ func (x *RemoveChainLocalOverridesByTargetResponse) MarshalEasyJSON(out *jwriter
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -13594,36 +14732,66 @@ func (x *SealWriteCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Shard_ID[i])
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
{
- const prefix string = ",\"ignoreErrors\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"ignoreErrors\":"
out.RawString(prefix)
out.Bool(x.IgnoreErrors)
}
{
- const prefix string = ",\"async\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"async\":"
out.RawString(prefix)
out.Bool(x.Async)
}
{
- const prefix string = ",\"restoreMode\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"restoreMode\":"
out.RawString(prefix)
out.Bool(x.RestoreMode)
}
{
- const prefix string = ",\"shrink\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shrink\":"
out.RawString(prefix)
out.Bool(x.Shrink)
}
@@ -13661,7 +14829,13 @@ func (x *SealWriteCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -13825,14 +14999,25 @@ func (x *SealWriteCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -14007,19 +15192,39 @@ func (x *SealWriteCacheResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.Shard_ID)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ if x.Shard_ID != nil {
+ out.Base64Bytes(x.Shard_ID)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"success\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"success\":"
out.RawString(prefix)
out.Bool(x.Success)
}
{
- const prefix string = ",\"error\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"error\":"
out.RawString(prefix)
out.String(x.Error)
}
@@ -14054,7 +15259,13 @@ func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer)
case "shardID":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Shard_ID = f
}
case "success":
@@ -14164,10 +15375,16 @@ func (x *SealWriteCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"results\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"results\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Results {
if i != 0 {
@@ -14352,14 +15569,25 @@ func (x *SealWriteCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -14494,16 +15722,26 @@ func (x *DetachShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Shard_ID[i])
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
@@ -14541,7 +15779,13 @@ func (x *DetachShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -14681,14 +15925,25 @@ func (x *DetachShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -14962,14 +16217,25 @@ func (x *DetachShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -15144,26 +16410,46 @@ func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- out.Base64Bytes(x.Shard_ID[i])
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
}
out.RawByte(']')
}
{
- const prefix string = ",\"targetFillPercent\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"targetFillPercent\":"
out.RawString(prefix)
out.Uint32(x.TargetFillPercent)
}
{
- const prefix string = ",\"concurrencyLimit\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"concurrencyLimit\":"
out.RawString(prefix)
out.Uint32(x.ConcurrencyLimit)
}
@@ -15201,7 +16487,13 @@ func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
list = append(list, f)
in.WantComma()
}
@@ -15211,13 +16503,29 @@ func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "targetFillPercent":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.TargetFillPercent = f
}
case "concurrencyLimit":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.ConcurrencyLimit = f
}
}
@@ -15353,14 +16661,25 @@ func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -15535,19 +16854,39 @@ func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Wri
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.Shard_ID)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ if x.Shard_ID != nil {
+ out.Base64Bytes(x.Shard_ID)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"success\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"success\":"
out.RawString(prefix)
out.Bool(x.Success)
}
{
- const prefix string = ",\"error\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"error\":"
out.RawString(prefix)
out.String(x.Error)
}
@@ -15582,7 +16921,13 @@ func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lex
case "shardID":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Shard_ID = f
}
case "success":
@@ -15692,10 +17037,16 @@ func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"results\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"results\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Results {
if i != 0 {
@@ -15880,14 +17231,25 @@ func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -15941,3 +17303,727 @@ func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
in.Consumed()
}
}
+
+type ListShardsForObjectRequest_Body struct {
+ ObjectId string `json:"objectId"`
+ ContainerId string `json:"containerId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.StringSize(1, x.ObjectId)
+ size += proto.StringSize(2, x.ContainerId)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ObjectId) != 0 {
+ mm.AppendString(1, x.ObjectId)
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendString(2, x.ContainerId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ObjectId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ObjectId")
+ }
+ x.ObjectId = data
+ case 2: // ContainerId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest_Body) GetObjectId() string {
+ if x != nil {
+ return x.ObjectId
+ }
+ return ""
+}
+func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) {
+ x.ObjectId = v
+}
+func (x *ListShardsForObjectRequest_Body) GetContainerId() string {
+ if x != nil {
+ return x.ContainerId
+ }
+ return ""
+}
+func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) {
+ x.ContainerId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"objectId\":"
+ out.RawString(prefix)
+ out.String(x.ObjectId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ out.String(x.ContainerId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "objectId":
+ {
+ var f string
+ f = in.String()
+ x.ObjectId = f
+ }
+ case "containerId":
+ {
+ var f string
+ f = in.String()
+ x.ContainerId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectRequest struct {
+ Body *ListShardsForObjectRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil)
+ _ json.Marshaler = (*ListShardsForObjectRequest)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListShardsForObjectRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsForObjectRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) {
+ x.Body = v
+}
+func (x *ListShardsForObjectRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsForObjectRequest_Body
+ f = new(ListShardsForObjectRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectResponse_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectResponse struct {
+ Body *ListShardsForObjectResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil)
+ _ json.Marshaler = (*ListShardsForObjectResponse)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListShardsForObjectResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsForObjectResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) {
+ x.Body = v
+}
+func (x *ListShardsForObjectResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsForObjectResponse_Body
+ f = new(ListShardsForObjectResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index f5cfefa85..045662ccf 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -26,7 +26,6 @@ const (
ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
- ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard"
ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation"
ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus"
ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus"
@@ -42,6 +41,7 @@ const (
ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache"
ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards"
ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild"
+ ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject"
)
// ControlServiceClient is the client API for ControlService service.
@@ -62,10 +62,6 @@ type ControlServiceClient interface {
SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error)
- // EvacuateShard moves all data from one shard to the others.
- // Deprecated: Use
- // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
- EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
@@ -100,6 +96,8 @@ type ControlServiceClient interface {
DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error)
// StartShardRebuild starts shard rebuild process.
StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error)
+ // ListShardsForObject returns shard info where object is stored.
+ ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error)
}
type controlServiceClient struct {
@@ -173,15 +171,6 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron
return out, nil
}
-func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) {
- out := new(EvacuateShardResponse)
- err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) {
out := new(StartShardEvacuationResponse)
err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...)
@@ -317,6 +306,15 @@ func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartS
return out, nil
}
+func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) {
+ out := new(ListShardsForObjectResponse)
+ err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// ControlServiceServer is the server API for ControlService service.
// All implementations should embed UnimplementedControlServiceServer
// for forward compatibility
@@ -335,10 +333,6 @@ type ControlServiceServer interface {
SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error)
- // EvacuateShard moves all data from one shard to the others.
- // Deprecated: Use
- // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
- EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
@@ -373,6 +367,8 @@ type ControlServiceServer interface {
DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error)
// StartShardRebuild starts shard rebuild process.
StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error)
+ // ListShardsForObject returns shard info where object is stored.
+ ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -400,9 +396,6 @@ func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShard
func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented")
}
-func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented")
-}
func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented")
}
@@ -448,6 +441,9 @@ func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachSh
func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented")
}
+func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented")
+}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -586,24 +582,6 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
-func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(EvacuateShardRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).EvacuateShard(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_EvacuateShard_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StartShardEvacuationRequest)
if err := dec(in); err != nil {
@@ -874,6 +852,24 @@ func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
+func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListShardsForObjectRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).ListShardsForObject(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_ListShardsForObject_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -909,10 +905,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SynchronizeTree",
Handler: _ControlService_SynchronizeTree_Handler,
},
- {
- MethodName: "EvacuateShard",
- Handler: _ControlService_EvacuateShard_Handler,
- },
{
MethodName: "StartShardEvacuation",
Handler: _ControlService_StartShardEvacuation_Handler,
@@ -973,6 +965,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "StartShardRebuild",
Handler: _ControlService_StartShardRebuild_Handler,
},
+ {
+ MethodName: "ListShardsForObject",
+ Handler: _ControlService_ListShardsForObject_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index f92106589..69d87292d 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -5,9 +5,9 @@ package control
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -234,16 +234,35 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"key\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.Key)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ if x.Key != nil {
+ out.Base64Bytes(x.Key)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
- out.Base64Bytes(x.Sign)
+ if x.Sign != nil {
+ out.Base64Bytes(x.Sign)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -276,13 +295,25 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "key":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Key = f
}
case "signature":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Sign = f
}
}
@@ -414,19 +445,35 @@ func (x *NodeInfo_Attribute) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"key\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
out.String(x.Key)
}
{
- const prefix string = ",\"value\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"value\":"
out.RawString(prefix)
out.String(x.Value)
}
{
- const prefix string = ",\"parents\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parents\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Parents {
@@ -645,14 +692,29 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"publicKey\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.PublicKey)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"publicKey\":"
+ out.RawString(prefix)
+ if x.PublicKey != nil {
+ out.Base64Bytes(x.PublicKey)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"addresses\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"addresses\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Addresses {
@@ -664,7 +726,12 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"attributes\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"attributes\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Attributes {
@@ -676,9 +743,19 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"state\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"state\":"
out.RawString(prefix)
- out.Int32(int32(x.State))
+ v := int32(x.State)
+ if vv, ok := NetmapStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
out.RawByte('}')
}
@@ -711,7 +788,13 @@ func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "publicKey":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.PublicKey = f
}
case "addresses":
@@ -878,14 +961,27 @@ func (x *Netmap) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"epoch\":"
- out.RawString(prefix[1:])
- out.Uint64(x.Epoch)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"epoch\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"nodes\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodes\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Nodes {
@@ -927,7 +1023,15 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "epoch":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.Epoch = f
}
case "nodes":
@@ -1179,19 +1283,39 @@ func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"shardID\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.Shard_ID)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ if x.Shard_ID != nil {
+ out.Base64Bytes(x.Shard_ID)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"metabasePath\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"metabasePath\":"
out.RawString(prefix)
out.String(x.MetabasePath)
}
{
- const prefix string = ",\"blobstor\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"blobstor\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Blobstor {
@@ -1203,27 +1327,57 @@ func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"writecachePath\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"writecachePath\":"
out.RawString(prefix)
out.String(x.WritecachePath)
}
{
- const prefix string = ",\"mode\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"mode\":"
out.RawString(prefix)
- out.Int32(int32(x.Mode))
+ v := int32(x.Mode)
+ if vv, ok := ShardMode_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
{
- const prefix string = ",\"errorCount\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"errorCount\":"
out.RawString(prefix)
out.Uint32(x.ErrorCount)
}
{
- const prefix string = ",\"piloramaPath\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"piloramaPath\":"
out.RawString(prefix)
out.String(x.PiloramaPath)
}
{
- const prefix string = ",\"evacuationInProgress\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"evacuationInProgress\":"
out.RawString(prefix)
out.Bool(x.EvacuationInProgress)
}
@@ -1258,7 +1412,13 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "shardID":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Shard_ID = f
}
case "metabasePath":
@@ -1312,7 +1472,15 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "errorCount":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.ErrorCount = f
}
case "piloramaPath":
@@ -1436,14 +1604,25 @@ func (x *BlobstorInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"path\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"path\":"
+ out.RawString(prefix)
out.String(x.Path)
}
{
- const prefix string = ",\"type\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"type\":"
out.RawString(prefix)
out.String(x.Type)
}
@@ -1637,14 +1816,30 @@ func (x *ChainTarget) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"type\":"
- out.RawString(prefix[1:])
- out.Int32(int32(x.Type))
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"type\":"
+ out.RawString(prefix)
+ v := int32(x.Type)
+ if vv, ok := ChainTarget_TargetType_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
{
- const prefix string = ",\"Name\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"Name\":"
out.RawString(prefix)
out.String(x.Name)
}
diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go
index 496b07a98..1b92fdaad 100644
--- a/pkg/services/netmap/executor.go
+++ b/pkg/services/netmap/executor.go
@@ -5,10 +5,11 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
versionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
@@ -26,9 +27,9 @@ type executorSvc struct {
// NodeState encapsulates information
// about current node state.
type NodeState interface {
- // Must return current node state
+ // LocalNodeInfo must return current node state
// in FrostFS API v2 NodeInfo structure.
- LocalNodeInfo() (*netmap.NodeInfo, error)
+ LocalNodeInfo() *netmapSDK.NodeInfo
// ReadCurrentNetMap reads current local network map of the storage node
// into the given parameter. Returns any error encountered which prevented
@@ -39,17 +40,19 @@ type NodeState interface {
// NetworkInfo encapsulates source of the
// recent information about the FrostFS network.
type NetworkInfo interface {
- // Must return recent network information in FrostFS API v2 NetworkInfo structure.
+ // Dump must return recent network information in FrostFS API v2 NetworkInfo structure.
//
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
- Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
+ Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error)
}
func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server {
- if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil {
- // this should never happen, otherwise it programmers bug
- panic("can't create netmap execution service")
- }
+ // this should never happen, otherwise it's a programmer's bug
+ msg := "BUG: can't create netmap execution service"
+ assert.False(s == nil, msg, "node state is nil")
+ assert.False(netInfo == nil, msg, "network info is nil")
+ assert.False(respSvc == nil, msg, "response service is nil")
+ assert.True(version.IsValid(v), msg, "invalid version")
res := &executorSvc{
state: s,
@@ -64,39 +67,15 @@ func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo,
func (s *executorSvc) LocalNodeInfo(
_ context.Context,
- req *netmap.LocalNodeInfoRequest,
+ _ *netmap.LocalNodeInfoRequest,
) (*netmap.LocalNodeInfoResponse, error) {
- verV2 := req.GetMetaHeader().GetVersion()
- if verV2 == nil {
- return nil, errors.New("missing version")
- }
-
- var ver versionsdk.Version
- if err := ver.ReadFromV2(*verV2); err != nil {
- return nil, fmt.Errorf("can't read version: %w", err)
- }
-
- ni, err := s.state.LocalNodeInfo()
- if err != nil {
- return nil, err
- }
-
- if addrNum := ni.NumberOfAddresses(); addrNum > 0 && ver.Minor() <= 7 {
- ni2 := new(netmap.NodeInfo)
- ni2.SetPublicKey(ni.GetPublicKey())
- ni2.SetState(ni.GetState())
- ni2.SetAttributes(ni.GetAttributes())
- ni.IterateAddresses(func(s string) bool {
- ni2.SetAddresses(s)
- return true
- })
-
- ni = ni2
- }
+ ni := s.state.LocalNodeInfo()
+ var nodeInfo netmap.NodeInfo
+ ni.WriteToV2(&nodeInfo)
body := new(netmap.LocalNodeInfoResponseBody)
body.SetVersion(&s.version)
- body.SetNodeInfo(ni)
+ body.SetNodeInfo(&nodeInfo)
resp := new(netmap.LocalNodeInfoResponse)
resp.SetBody(body)
@@ -106,7 +85,7 @@ func (s *executorSvc) LocalNodeInfo(
}
func (s *executorSvc) NetworkInfo(
- _ context.Context,
+ ctx context.Context,
req *netmap.NetworkInfoRequest,
) (*netmap.NetworkInfoResponse, error) {
verV2 := req.GetMetaHeader().GetVersion()
@@ -119,7 +98,7 @@ func (s *executorSvc) NetworkInfo(
return nil, fmt.Errorf("can't read version: %w", err)
}
- ni, err := s.netInfo.Dump(ver)
+ ni, err := s.netInfo.Dump(ctx, ver)
if err != nil {
return nil, err
}
diff --git a/pkg/services/netmap/server.go b/pkg/services/netmap/server.go
index 0a09c9f44..eff880dbe 100644
--- a/pkg/services/netmap/server.go
+++ b/pkg/services/netmap/server.go
@@ -3,7 +3,7 @@ package netmap
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
)
// Server is an interface of the FrostFS API Netmap service server.
diff --git a/pkg/services/netmap/sign.go b/pkg/services/netmap/sign.go
index 9a16ad8f1..5f184d5c0 100644
--- a/pkg/services/netmap/sign.go
+++ b/pkg/services/netmap/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
)
type signService struct {
diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go
deleted file mode 100644
index 921545c8b..000000000
--- a/pkg/services/object/acl/acl.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package acl
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "errors"
- "fmt"
- "io"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- eaclV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/eacl/v2"
- v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
- bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-// Checker implements v2.ACLChecker interfaces and provides
-// ACL/eACL validation functionality.
-type Checker struct {
- eaclSrc container.EACLSource
- validator *eaclSDK.Validator
- localStorage *engine.StorageEngine
- state netmap.State
-}
-
-type localStorage struct {
- ls *engine.StorageEngine
-}
-
-func (s *localStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
- if s.ls == nil {
- return nil, io.ErrUnexpectedEOF
- }
-
- return engine.Head(ctx, s.ls, addr)
-}
-
-// Various EACL check errors.
-var (
- errEACLDeniedByRule = errors.New("denied by rule")
- errBearerExpired = errors.New("bearer token has expired")
- errBearerInvalidSignature = errors.New("bearer token has invalid signature")
- errBearerInvalidContainerID = errors.New("bearer token was created for another container")
- errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
- errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
-)
-
-// NewChecker creates Checker.
-// Panics if at least one of the parameter is nil.
-func NewChecker(
- state netmap.State,
- eaclSrc container.EACLSource,
- validator *eaclSDK.Validator,
- localStorage *engine.StorageEngine,
-) *Checker {
- return &Checker{
- eaclSrc: eaclSrc,
- validator: validator,
- localStorage: localStorage,
- state: state,
- }
-}
-
-// CheckBasicACL is a main check function for basic ACL.
-func (c *Checker) CheckBasicACL(info v2.RequestInfo) bool {
- // check basic ACL permissions
- return info.BasicACL().IsOpAllowed(info.Operation(), info.RequestRole())
-}
-
-// StickyBitCheck validates owner field in the request if sticky bit is enabled.
-func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool {
- // According to FrostFS specification sticky bit has no effect on system nodes
- // for correct intra-container work with objects (in particular, replication).
- if info.RequestRole() == acl.RoleContainer {
- return true
- }
-
- if !info.BasicACL().Sticky() {
- return true
- }
-
- if len(info.SenderKey()) == 0 {
- return false
- }
-
- requestSenderKey := unmarshalPublicKey(info.SenderKey())
-
- return isOwnerFromKey(owner, requestSenderKey)
-}
-
-// CheckEACL is a main check function for extended ACL.
-func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
- basicACL := reqInfo.BasicACL()
- if !basicACL.Extendable() {
- return nil
- }
-
- bearerTok := reqInfo.Bearer()
- impersonate := bearerTok != nil && bearerTok.Impersonate()
-
- // if bearer token is not allowed, then ignore it
- if impersonate || !basicACL.AllowedBearerRules(reqInfo.Operation()) {
- reqInfo.CleanBearer()
- }
-
- var table eaclSDK.Table
- cnr := reqInfo.ContainerID()
-
- if bearerTok == nil {
- eaclInfo, err := c.eaclSrc.GetEACL(cnr)
- if err != nil {
- if client.IsErrEACLNotFound(err) {
- return nil
- }
- return err
- }
-
- table = *eaclInfo.Value
- } else {
- table = bearerTok.EACLTable()
- }
-
- // if bearer token is not present, isValidBearer returns true
- if err := isValidBearer(reqInfo, c.state); err != nil {
- return err
- }
-
- hdrSrc, err := c.getHeaderSource(cnr, msg, reqInfo)
- if err != nil {
- return err
- }
-
- eaclRole := getRole(reqInfo)
-
- action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit).
- WithRole(eaclRole).
- WithOperation(eaclSDK.Operation(reqInfo.Operation())).
- WithContainerID(&cnr).
- WithSenderKey(reqInfo.SenderKey()).
- WithHeaderSource(hdrSrc).
- WithEACLTable(&table),
- )
-
- if action != eaclSDK.ActionAllow {
- return errEACLDeniedByRule
- }
- return nil
-}
-
-func getRole(reqInfo v2.RequestInfo) eaclSDK.Role {
- var eaclRole eaclSDK.Role
- switch op := reqInfo.RequestRole(); op {
- default:
- eaclRole = eaclSDK.Role(op)
- case acl.RoleOwner:
- eaclRole = eaclSDK.RoleUser
- case acl.RoleInnerRing, acl.RoleContainer:
- eaclRole = eaclSDK.RoleSystem
- case acl.RoleOthers:
- eaclRole = eaclSDK.RoleOthers
- }
- return eaclRole
-}
-
-func (c *Checker) getHeaderSource(cnr cid.ID, msg any, reqInfo v2.RequestInfo) (eaclSDK.TypedHeaderSource, error) {
- var xHeaderSource eaclV2.XHeaderSource
- if req, ok := msg.(eaclV2.Request); ok {
- xHeaderSource = eaclV2.NewRequestXHeaderSource(req)
- } else {
- xHeaderSource = eaclV2.NewResponseXHeaderSource(msg.(eaclV2.Response), reqInfo.Request().(eaclV2.Request))
- }
-
- hdrSrc, err := eaclV2.NewMessageHeaderSource(&localStorage{ls: c.localStorage}, xHeaderSource, cnr, eaclV2.WithOID(reqInfo.ObjectID()))
- if err != nil {
- return nil, fmt.Errorf("can't parse headers: %w", err)
- }
- return hdrSrc, nil
-}
-
-// isValidBearer checks whether bearer token was correctly signed by authorized
-// entity. This method might be defined on whole ACL service because it will
-// require fetching current epoch to check lifetime.
-func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error {
- ownerCnr := reqInfo.ContainerOwner()
-
- token := reqInfo.Bearer()
-
- // 0. Check if bearer token is present in reqInfo.
- if token == nil {
- return nil
- }
-
- // 1. First check token lifetime. Simplest verification.
- if token.InvalidAt(st.CurrentEpoch()) {
- return errBearerExpired
- }
-
- // 2. Then check if bearer token is signed correctly.
- if !token.VerifySignature() {
- return errBearerInvalidSignature
- }
-
- // 3. Then check if container is either empty or equal to the container in the request.
- cnr, isSet := token.EACLTable().CID()
- if isSet && !cnr.Equals(reqInfo.ContainerID()) {
- return errBearerInvalidContainerID
- }
-
- // 4. Then check if container owner signed this token.
- if !bearerSDK.ResolveIssuer(*token).Equals(ownerCnr) {
- // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
- return errBearerNotSignedByOwner
- }
-
- // 5. Then check if request sender has rights to use this token.
- var keySender frostfsecdsa.PublicKey
-
- err := keySender.Decode(reqInfo.SenderKey())
- if err != nil {
- return fmt.Errorf("decode sender public key: %w", err)
- }
-
- var usrSender user.ID
- user.IDFromKey(&usrSender, ecdsa.PublicKey(keySender))
-
- if !token.AssertUser(usrSender) {
- // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
- return errBearerInvalidOwner
- }
-
- return nil
-}
-
-func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
- if key == nil {
- return false
- }
-
- var id2 user.ID
- user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
-
- return id.Equals(id2)
-}
-
-func unmarshalPublicKey(bs []byte) *keys.PublicKey {
- pub, err := keys.NewPublicKeyFromBytes(bs, elliptic.P256())
- if err != nil {
- return nil
- }
- return pub
-}
diff --git a/pkg/services/object/acl/acl_test.go b/pkg/services/object/acl/acl_test.go
deleted file mode 100644
index d63cb1285..000000000
--- a/pkg/services/object/acl/acl_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package acl
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "github.com/stretchr/testify/require"
-)
-
-type emptyEACLSource struct{}
-
-func (e emptyEACLSource) GetEACL(_ cid.ID) (*container.EACL, error) {
- return nil, nil
-}
-
-type emptyNetmapState struct{}
-
-func (e emptyNetmapState) CurrentEpoch() uint64 {
- return 0
-}
-
-func TestStickyCheck(t *testing.T) {
- checker := NewChecker(
- emptyNetmapState{},
- emptyEACLSource{},
- eaclSDK.NewValidator(),
- &engine.StorageEngine{})
-
- t.Run("system role", func(t *testing.T) {
- var info v2.RequestInfo
-
- info.SetSenderKey(make([]byte, 33)) // any non-empty key
- info.SetRequestRole(acl.RoleContainer)
-
- require.True(t, checker.StickyBitCheck(info, usertest.ID()))
-
- var basicACL acl.Basic
- basicACL.MakeSticky()
-
- info.SetBasicACL(basicACL)
-
- require.True(t, checker.StickyBitCheck(info, usertest.ID()))
- })
-
- t.Run("owner ID and/or public key emptiness", func(t *testing.T) {
- var info v2.RequestInfo
-
- info.SetRequestRole(acl.RoleOthers) // should be non-system role
-
- assertFn := func(isSticky, withKey, withOwner, expected bool) {
- info := info
- if isSticky {
- var basicACL acl.Basic
- basicACL.MakeSticky()
-
- info.SetBasicACL(basicACL)
- }
-
- if withKey {
- info.SetSenderKey(make([]byte, 33))
- } else {
- info.SetSenderKey(nil)
- }
-
- var ownerID user.ID
-
- if withOwner {
- ownerID = usertest.ID()
- }
-
- require.Equal(t, expected, checker.StickyBitCheck(info, ownerID))
- }
-
- assertFn(true, false, false, false)
- assertFn(true, true, false, false)
- assertFn(true, false, true, false)
- assertFn(false, false, false, true)
- assertFn(false, true, false, true)
- assertFn(false, false, true, true)
- assertFn(false, true, true, true)
- })
-}
diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go
deleted file mode 100644
index 023b99239..000000000
--- a/pkg/services/object/acl/eacl/v2/eacl_test.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package v2
-
-import (
- "context"
- "crypto/ecdsa"
- "errors"
- "testing"
-
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-type testLocalStorage struct {
- t *testing.T
-
- expAddr oid.Address
-
- obj *objectSDK.Object
-
- err error
-}
-
-func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
- require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
- require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
-
- return s.obj, s.err
-}
-
-func testXHeaders(strs ...string) []session.XHeader {
- res := make([]session.XHeader, len(strs)/2)
-
- for i := 0; i < len(strs); i += 2 {
- res[i/2].SetKey(strs[i])
- res[i/2].SetValue(strs[i+1])
- }
-
- return res
-}
-
-func TestHeadRequest(t *testing.T) {
- req := new(objectV2.HeadRequest)
-
- meta := new(session.RequestMetaHeader)
- req.SetMetaHeader(meta)
-
- body := new(objectV2.HeadRequestBody)
- req.SetBody(body)
-
- addr := oidtest.Address()
-
- var addrV2 refs.Address
- addr.WriteToV2(&addrV2)
-
- body.SetAddress(&addrV2)
-
- xKey := "x-key"
- xVal := "x-val"
- xHdrs := testXHeaders(
- xKey, xVal,
- )
-
- meta.SetXHeaders(xHdrs)
-
- obj := objectSDK.New()
-
- attrKey := "attr_key"
- attrVal := "attr_val"
- var attr objectSDK.Attribute
- attr.SetKey(attrKey)
- attr.SetValue(attrVal)
- obj.SetAttributes(attr)
-
- table := new(eaclSDK.Table)
-
- priv, err := keys.NewPrivateKey()
- require.NoError(t, err)
- senderKey := priv.PublicKey()
-
- r := eaclSDK.NewRecord()
- r.SetOperation(eaclSDK.OperationHead)
- r.SetAction(eaclSDK.ActionDeny)
- r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal)
- r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal)
- eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
-
- table.AddRecord(r)
-
- lStorage := &testLocalStorage{
- t: t,
- expAddr: addr,
- obj: obj,
- }
-
- id := addr.Object()
-
- newSource := func(t *testing.T) eaclSDK.TypedHeaderSource {
- hdrSrc, err := NewMessageHeaderSource(
- lStorage,
- NewRequestXHeaderSource(req),
- addr.Container(),
- WithOID(&id))
- require.NoError(t, err)
- return hdrSrc
- }
-
- cnr := addr.Container()
-
- unit := new(eaclSDK.ValidationUnit).
- WithContainerID(&cnr).
- WithOperation(eaclSDK.OperationHead).
- WithSenderKey(senderKey.Bytes()).
- WithEACLTable(table)
-
- validator := eaclSDK.NewValidator()
-
- checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t)))
-
- meta.SetXHeaders(nil)
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- meta.SetXHeaders(xHdrs)
-
- obj.SetAttributes()
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- lStorage.err = errors.New("any error")
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- r.SetAction(eaclSDK.ActionAllow)
-
- rID := eaclSDK.NewRecord()
- rID.SetOperation(eaclSDK.OperationHead)
- rID.SetAction(eaclSDK.ActionDeny)
- rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object())
- eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
-
- table = eaclSDK.NewTable()
- table.AddRecord(r)
- table.AddRecord(rID)
-
- unit.WithEACLTable(table)
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-}
-
-func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
- actual, fromRule := v.CalculateAction(u)
- require.True(t, fromRule)
- require.Equal(t, expected, actual)
-}
-
-func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
- actual, fromRule := v.CalculateAction(u)
- require.False(t, fromRule)
- require.Equal(t, eaclSDK.ActionAllow, actual)
-}
diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go
deleted file mode 100644
index 34975e1e6..000000000
--- a/pkg/services/object/acl/eacl/v2/headers.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package v2
-
-import (
- "context"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-type Option func(*cfg)
-
-type cfg struct {
- storage ObjectStorage
-
- msg XHeaderSource
-
- cnr cid.ID
- obj *oid.ID
-}
-
-type ObjectStorage interface {
- Head(context.Context, oid.Address) (*objectSDK.Object, error)
-}
-
-type Request interface {
- GetMetaHeader() *session.RequestMetaHeader
-}
-
-type Response interface {
- GetMetaHeader() *session.ResponseMetaHeader
-}
-
-type headerSource struct {
- requestHeaders []eaclSDK.Header
- objectHeaders []eaclSDK.Header
-
- incompleteObjectHeaders bool
-}
-
-func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) {
- cfg := &cfg{
- storage: os,
- cnr: cnrID,
- msg: xhs,
- }
-
- for i := range opts {
- opts[i](cfg)
- }
-
- if cfg.msg == nil {
- return nil, errors.New("message is not provided")
- }
-
- var res headerSource
-
- err := cfg.readObjectHeaders(&res)
- if err != nil {
- return nil, err
- }
-
- res.requestHeaders = cfg.msg.GetXHeaders()
-
- return res, nil
-}
-
-func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) {
- switch typ {
- default:
- return nil, true
- case eaclSDK.HeaderFromRequest:
- return h.requestHeaders, true
- case eaclSDK.HeaderFromObject:
- return h.objectHeaders, !h.incompleteObjectHeaders
- }
-}
-
-type xHeader session.XHeader
-
-func (x xHeader) Key() string {
- return (*session.XHeader)(&x).GetKey()
-}
-
-func (x xHeader) Value() string {
- return (*session.XHeader)(&x).GetValue()
-}
-
-var errMissingOID = errors.New("object ID is missing")
-
-func (h *cfg) readObjectHeaders(dst *headerSource) error {
- switch m := h.msg.(type) {
- default:
- panic(fmt.Sprintf("unexpected message type %T", h.msg))
- case requestXHeaderSource:
- return h.readObjectHeadersFromRequestXHeaderSource(m, dst)
- case responseXHeaderSource:
- return h.readObjectHeadersResponseXHeaderSource(m, dst)
- }
-}
-
-func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error {
- switch req := m.req.(type) {
- case
- *objectV2.GetRequest,
- *objectV2.HeadRequest:
- if h.obj == nil {
- return errMissingOID
- }
-
- objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
-
- dst.objectHeaders = objHeaders
- dst.incompleteObjectHeaders = !completed
- case
- *objectV2.GetRangeRequest,
- *objectV2.GetRangeHashRequest,
- *objectV2.DeleteRequest:
- if h.obj == nil {
- return errMissingOID
- }
-
- dst.objectHeaders = addressHeaders(h.cnr, h.obj)
- case *objectV2.PutRequest:
- if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- oV2 := new(objectV2.Object)
- oV2.SetObjectID(v.GetObjectID())
- oV2.SetHeader(v.GetHeader())
-
- dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
- }
- case *objectV2.PutSingleRequest:
- dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj)
- case *objectV2.SearchRequest:
- cnrV2 := req.GetBody().GetContainerID()
- var cnr cid.ID
-
- if cnrV2 != nil {
- if err := cnr.ReadFromV2(*cnrV2); err != nil {
- return fmt.Errorf("can't parse container ID: %w", err)
- }
- }
-
- dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
- }
- return nil
-}
-
-func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error {
- switch resp := m.resp.(type) {
- default:
- objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
-
- dst.objectHeaders = objectHeaders
- dst.incompleteObjectHeaders = !completed
- case *objectV2.GetResponse:
- if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
- oV2 := new(objectV2.Object)
- oV2.SetObjectID(v.GetObjectID())
- oV2.SetHeader(v.GetHeader())
-
- dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
- }
- case *objectV2.HeadResponse:
- oV2 := new(objectV2.Object)
-
- var hdr *objectV2.Header
-
- switch v := resp.GetBody().GetHeaderPart().(type) {
- case *objectV2.ShortHeader:
- hdr = new(objectV2.Header)
-
- var idV2 refsV2.ContainerID
- h.cnr.WriteToV2(&idV2)
-
- hdr.SetContainerID(&idV2)
- hdr.SetVersion(v.GetVersion())
- hdr.SetCreationEpoch(v.GetCreationEpoch())
- hdr.SetOwnerID(v.GetOwnerID())
- hdr.SetObjectType(v.GetObjectType())
- hdr.SetPayloadLength(v.GetPayloadLength())
- case *objectV2.HeaderWithSignature:
- hdr = v.GetHeader()
- }
-
- oV2.SetHeader(hdr)
-
- dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
- }
- return nil
-}
-
-func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) {
- if idObj != nil {
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(*idObj)
-
- obj, err := h.storage.Head(context.TODO(), addr)
- if err == nil {
- return headersFromObject(obj, cnr, idObj), true
- }
- }
-
- return addressHeaders(cnr, idObj), false
-}
-
-func cidHeader(idCnr cid.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectContainerID,
- v: idCnr.EncodeToString(),
- }
-}
-
-func oidHeader(obj oid.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectID,
- v: obj.EncodeToString(),
- }
-}
-
-func ownerIDHeader(ownerID user.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectOwnerID,
- v: ownerID.EncodeToString(),
- }
-}
-
-func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
- hh := make([]eaclSDK.Header, 0, 2)
- hh = append(hh, cidHeader(cnr))
-
- if oid != nil {
- hh = append(hh, oidHeader(*oid))
- }
-
- return hh
-}
diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go
deleted file mode 100644
index 72bd4c2d2..000000000
--- a/pkg/services/object/acl/eacl/v2/object.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package v2
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type sysObjHdr struct {
- k, v string
-}
-
-func (s sysObjHdr) Key() string {
- return s.k
-}
-
-func (s sysObjHdr) Value() string {
- return s.v
-}
-
-func u64Value(v uint64) string {
- return strconv.FormatUint(v, 10)
-}
-
-func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
- var count int
- for obj := obj; obj != nil; obj = obj.Parent() {
- count += 9 + len(obj.Attributes())
- }
-
- res := make([]eaclSDK.Header, 0, count)
- for ; obj != nil; obj = obj.Parent() {
- res = append(res,
- cidHeader(cnr),
- // creation epoch
- sysObjHdr{
- k: acl.FilterObjectCreationEpoch,
- v: u64Value(obj.CreationEpoch()),
- },
- // payload size
- sysObjHdr{
- k: acl.FilterObjectPayloadLength,
- v: u64Value(obj.PayloadSize()),
- },
- // object version
- sysObjHdr{
- k: acl.FilterObjectVersion,
- v: obj.Version().String(),
- },
- // object type
- sysObjHdr{
- k: acl.FilterObjectType,
- v: obj.Type().String(),
- },
- )
-
- if oid != nil {
- res = append(res, oidHeader(*oid))
- }
-
- if idOwner := obj.OwnerID(); !idOwner.IsEmpty() {
- res = append(res, ownerIDHeader(idOwner))
- }
-
- cs, ok := obj.PayloadChecksum()
- if ok {
- res = append(res, sysObjHdr{
- k: acl.FilterObjectPayloadHash,
- v: cs.String(),
- })
- }
-
- cs, ok = obj.PayloadHomomorphicHash()
- if ok {
- res = append(res, sysObjHdr{
- k: acl.FilterObjectHomomorphicHash,
- v: cs.String(),
- })
- }
-
- attrs := obj.Attributes()
- for i := range attrs {
- res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface
- }
- }
-
- return res
-}
diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go
deleted file mode 100644
index d91a21c75..000000000
--- a/pkg/services/object/acl/eacl/v2/opts.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package v2
-
-import (
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func WithOID(v *oid.ID) Option {
- return func(c *cfg) {
- c.obj = v
- }
-}
diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go
deleted file mode 100644
index c1fdea9d8..000000000
--- a/pkg/services/object/acl/eacl/v2/xheader.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
-)
-
-type XHeaderSource interface {
- GetXHeaders() []eaclSDK.Header
-}
-
-type requestXHeaderSource struct {
- req Request
-}
-
-func NewRequestXHeaderSource(req Request) XHeaderSource {
- return requestXHeaderSource{req: req}
-}
-
-type responseXHeaderSource struct {
- resp Response
-
- req Request
-}
-
-func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource {
- return responseXHeaderSource{resp: resp, req: req}
-}
-
-func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header {
- ln := 0
-
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- ln += len(meta.GetXHeaders())
- }
-
- res := make([]eaclSDK.Header, 0, ln)
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- x := meta.GetXHeaders()
- for i := range x {
- res = append(res, (xHeader)(x[i]))
- }
- }
-
- return res
-}
-
-func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header {
- ln := 0
- xHdrs := make([][]session.XHeader, 0)
-
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- x := meta.GetXHeaders()
-
- ln += len(x)
-
- xHdrs = append(xHdrs, x)
- }
-
- res := make([]eaclSDK.Header, 0, ln)
-
- for i := range xHdrs {
- for j := range xHdrs[i] {
- res = append(res, xHeader(xHdrs[i][j]))
- }
- }
-
- return res
-}
diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go
deleted file mode 100644
index 11b9e6e5f..000000000
--- a/pkg/services/object/acl/v2/errors.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package v2
-
-import (
- "fmt"
-
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-const invalidRequestMessage = "malformed request"
-
-func malformedRequestError(reason string) error {
- return fmt.Errorf("%s: %s", invalidRequestMessage, reason)
-}
-
-var (
- errEmptyBody = malformedRequestError("empty body")
- errEmptyVerificationHeader = malformedRequestError("empty verification header")
- errEmptyBodySig = malformedRequestError("empty at body signature")
- errInvalidSessionSig = malformedRequestError("invalid session token signature")
- errInvalidSessionOwner = malformedRequestError("invalid session token owner")
- errInvalidVerb = malformedRequestError("session token verb is invalid")
-)
-
-const (
- accessDeniedACLReasonFmt = "access to operation %s is denied by basic ACL check"
- accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v"
-)
-
-func basicACLErr(info RequestInfo) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedACLReasonFmt, info.operation))
-
- return errAccessDenied
-}
-
-func eACLErr(info RequestInfo, err error) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err))
-
- return errAccessDenied
-}
diff --git a/pkg/services/object/acl/v2/errors_test.go b/pkg/services/object/acl/v2/errors_test.go
deleted file mode 100644
index 2d2b7bc8d..000000000
--- a/pkg/services/object/acl/v2/errors_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package v2
-
-import (
- "errors"
- "testing"
-
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "github.com/stretchr/testify/require"
-)
-
-func TestBasicACLErr(t *testing.T) {
- var reqInfo RequestInfo
- err := basicACLErr(reqInfo)
-
- var errAccessDenied *apistatus.ObjectAccessDenied
-
- require.ErrorAs(t, err, &errAccessDenied,
- "basicACLErr must be able to be casted to apistatus.ObjectAccessDenied")
-}
-
-func TestEACLErr(t *testing.T) {
- var reqInfo RequestInfo
- testErr := errors.New("test-eacl")
- err := eACLErr(reqInfo, testErr)
-
- var errAccessDenied *apistatus.ObjectAccessDenied
-
- require.ErrorAs(t, err, &errAccessDenied,
- "eACLErr must be able to be casted to apistatus.ObjectAccessDenied")
-}
diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go
deleted file mode 100644
index 15fcce884..000000000
--- a/pkg/services/object/acl/v2/opts.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// WithLogger returns option to set logger.
-func WithLogger(v *logger.Logger) Option {
- return func(c *cfg) {
- c.log = v
- }
-}
diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go
deleted file mode 100644
index 74279e453..000000000
--- a/pkg/services/object/acl/v2/request.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package v2
-
-import (
- "crypto/ecdsa"
- "fmt"
-
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-// RequestInfo groups parsed version-independent (from SDK library)
-// request information and raw API request.
-type RequestInfo struct {
- basicACL acl.Basic
- requestRole acl.Role
- operation acl.Op // put, get, head, etc.
- cnrOwner user.ID // container owner
-
- // cnrNamespace defined to which namespace a container is belonged.
- cnrNamespace string
-
- idCnr cid.ID
-
- // optional for some request
- // e.g. Put, Search
- obj *oid.ID
-
- senderKey []byte
-
- bearer *bearer.Token // bearer token of request
-
- srcRequest any
-}
-
-func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) {
- r.basicACL = basicACL
-}
-
-func (r *RequestInfo) SetRequestRole(requestRole acl.Role) {
- r.requestRole = requestRole
-}
-
-func (r *RequestInfo) SetSenderKey(senderKey []byte) {
- r.senderKey = senderKey
-}
-
-// Request returns raw API request.
-func (r RequestInfo) Request() any {
- return r.srcRequest
-}
-
-// ContainerOwner returns owner if the container.
-func (r RequestInfo) ContainerOwner() user.ID {
- return r.cnrOwner
-}
-
-func (r RequestInfo) ContainerNamespace() string {
- return r.cnrNamespace
-}
-
-// ObjectID return object ID.
-func (r RequestInfo) ObjectID() *oid.ID {
- return r.obj
-}
-
-// ContainerID return container ID.
-func (r RequestInfo) ContainerID() cid.ID {
- return r.idCnr
-}
-
-// CleanBearer forces cleaning bearer token information.
-func (r *RequestInfo) CleanBearer() {
- r.bearer = nil
-}
-
-// Bearer returns bearer token of the request.
-func (r RequestInfo) Bearer() *bearer.Token {
- return r.bearer
-}
-
-// BasicACL returns basic ACL of the container.
-func (r RequestInfo) BasicACL() acl.Basic {
- return r.basicACL
-}
-
-// SenderKey returns public key of the request's sender.
-func (r RequestInfo) SenderKey() []byte {
- return r.senderKey
-}
-
-// Operation returns request's operation.
-func (r RequestInfo) Operation() acl.Op {
- return r.operation
-}
-
-// RequestRole returns request sender's role.
-func (r RequestInfo) RequestRole() acl.Role {
- return r.requestRole
-}
-
-// IsSoftAPECheck states if APE should perform soft checks.
-// Soft APE check allows a request if CheckAPE returns NoRuleFound for it,
-// otherwise it denies the request.
-func (r RequestInfo) IsSoftAPECheck() bool {
- return r.BasicACL().Bits() != 0
-}
-
-// MetaWithToken groups session and bearer tokens,
-// verification header and raw API request.
-type MetaWithToken struct {
- vheader *sessionV2.RequestVerificationHeader
- token *sessionSDK.Object
- bearer *bearer.Token
- src any
-}
-
-// RequestOwner returns ownerID and its public key
-// according to internal meta information.
-func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) {
- if r.vheader == nil {
- return nil, nil, errEmptyVerificationHeader
- }
-
- if r.bearer != nil && r.bearer.Impersonate() {
- return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes())
- }
-
- // if session token is presented, use it as truth source
- if r.token != nil {
- // verify signature of session token
- return ownerFromToken(r.token)
- }
-
- // otherwise get original body signature
- bodySignature := originalBodySignature(r.vheader)
- if bodySignature == nil {
- return nil, nil, errEmptyBodySig
- }
-
- return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
-}
-
-func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
- key, err := unmarshalPublicKey(rawKey)
- if err != nil {
- return nil, nil, fmt.Errorf("invalid signature key: %w", err)
- }
-
- var idSender user.ID
- user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
-
- return &idSender, key, nil
-}
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
deleted file mode 100644
index 5a8e8b065..000000000
--- a/pkg/services/object/acl/v2/service.go
+++ /dev/null
@@ -1,919 +0,0 @@
-package v2
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
-
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "go.uber.org/zap"
-)
-
-// Service checks basic ACL rules.
-type Service struct {
- *cfg
-
- c objectCore.SenderClassifier
-}
-
-type putStreamBasicChecker struct {
- source *Service
- next object.PutObjectStream
-}
-
-type patchStreamBasicChecker struct {
- source *Service
- next object.PatchObjectStream
- nonFirstSend bool
-}
-
-type getStreamBasicChecker struct {
- checker ACLChecker
-
- object.GetObjectStream
-
- info RequestInfo
-}
-
-type rangeStreamBasicChecker struct {
- checker ACLChecker
-
- object.GetObjectRangeStream
-
- info RequestInfo
-}
-
-type searchStreamBasicChecker struct {
- checker ACLChecker
-
- object.SearchStream
-
- info RequestInfo
-}
-
-// Option represents Service constructor option.
-type Option func(*cfg)
-
-type cfg struct {
- log *logger.Logger
-
- containers container.Source
-
- checker ACLChecker
-
- irFetcher InnerRingFetcher
-
- nm netmap.Source
-
- next object.ServiceServer
-}
-
-// New is a constructor for object ACL checking service.
-func New(next object.ServiceServer,
- nm netmap.Source,
- irf InnerRingFetcher,
- acl ACLChecker,
- cs container.Source,
- opts ...Option,
-) Service {
- cfg := &cfg{
- log: &logger.Logger{Logger: zap.L()},
- next: next,
- nm: nm,
- irFetcher: irf,
- checker: acl,
- containers: cs,
- }
-
- for i := range opts {
- opts[i](cfg)
- }
-
- return Service{
- cfg: cfg,
- c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
- }
-}
-
-// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context.
-// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
-type wrappedGetObjectStream struct {
- object.GetObjectStream
-
- requestInfo RequestInfo
-}
-
-func (w *wrappedGetObjectStream) Context() context.Context {
- return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{
- Namespace: w.requestInfo.ContainerNamespace(),
- ContainerOwner: w.requestInfo.ContainerOwner(),
- SenderKey: w.requestInfo.SenderKey(),
- Role: w.requestInfo.RequestRole(),
- SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
- BearerToken: w.requestInfo.Bearer(),
- })
-}
-
-func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream {
- return &wrappedGetObjectStream{
- GetObjectStream: getObjectStream,
- requestInfo: reqInfo,
- }
-}
-
-// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context.
-// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
-type wrappedRangeStream struct {
- object.GetObjectRangeStream
-
- requestInfo RequestInfo
-}
-
-func (w *wrappedRangeStream) Context() context.Context {
- return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{
- Namespace: w.requestInfo.ContainerNamespace(),
- ContainerOwner: w.requestInfo.ContainerOwner(),
- SenderKey: w.requestInfo.SenderKey(),
- Role: w.requestInfo.RequestRole(),
- SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
- BearerToken: w.requestInfo.Bearer(),
- })
-}
-
-func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream {
- return &wrappedRangeStream{
- GetObjectRangeStream: rangeStream,
- requestInfo: reqInfo,
- }
-}
-
-// wrappedSearchStream propagates RequestContext into SearchStream's context.
-// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
-type wrappedSearchStream struct {
- object.SearchStream
-
- requestInfo RequestInfo
-}
-
-func (w *wrappedSearchStream) Context() context.Context {
- return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{
- Namespace: w.requestInfo.ContainerNamespace(),
- ContainerOwner: w.requestInfo.ContainerOwner(),
- SenderKey: w.requestInfo.SenderKey(),
- Role: w.requestInfo.RequestRole(),
- SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
- BearerToken: w.requestInfo.Bearer(),
- })
-}
-
-func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream {
- return &wrappedSearchStream{
- SearchStream: searchStream,
- requestInfo: reqInfo,
- }
-}
-
-// Get implements ServiceServer interface, makes ACL checks and calls
-// next Get method in the ServiceServer pipeline.
-func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
- }
-
- return b.next.Get(request, &getStreamBasicChecker{
- GetObjectStream: newWrappedGetObjectStreamStream(stream, reqInfo),
- info: reqInfo,
- checker: b.checker,
- })
-}
-
-func (b Service) Put() (object.PutObjectStream, error) {
- streamer, err := b.next.Put()
-
- return putStreamBasicChecker{
- source: &b,
- next: streamer,
- }, err
-}
-
-func (b Service) Patch() (object.PatchObjectStream, error) {
- streamer, err := b.next.Patch()
-
- return &patchStreamBasicChecker{
- source: &b,
- next: streamer,
- }, err
-}
-
-func (b Service) Head(
- ctx context.Context,
- request *objectV2.HeadRequest,
-) (*objectV2.HeadResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
- }
-
- resp, err := b.next.Head(requestContext(ctx, reqInfo), request)
- if err == nil {
- if err = b.checker.CheckEACL(resp, reqInfo); err != nil {
- err = eACLErr(reqInfo, err)
- }
- }
-
- return resp, err
-}
-
-func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
- id, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, id, nil)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch)
- if err != nil {
- return err
- }
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
- }
-
- return b.next.Search(request, &searchStreamBasicChecker{
- checker: b.checker,
- SearchStream: newWrappedSearchStream(stream, reqInfo),
- info: reqInfo,
- })
-}
-
-func (b Service) Delete(
- ctx context.Context,
- request *objectV2.DeleteRequest,
-) (*objectV2.DeleteResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
- }
-
- return b.next.Delete(requestContext(ctx, reqInfo), request)
-}
-
-func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
- }
-
- return b.next.GetRange(request, &rangeStreamBasicChecker{
- checker: b.checker,
- GetObjectRangeStream: newWrappedRangeStream(stream, reqInfo),
- info: reqInfo,
- })
-}
-
-func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context {
- return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{
- Namespace: reqInfo.ContainerNamespace(),
- ContainerOwner: reqInfo.ContainerOwner(),
- SenderKey: reqInfo.SenderKey(),
- Role: reqInfo.RequestRole(),
- SoftAPECheck: reqInfo.IsSoftAPECheck(),
- BearerToken: reqInfo.Bearer(),
- })
-}
-
-func (b Service) GetRangeHash(
- ctx context.Context,
- request *objectV2.GetRangeHashRequest,
-) (*objectV2.GetRangeHashResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
- }
-
- return b.next.GetRangeHash(requestContext(ctx, reqInfo), request)
-}
-
-func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID()
- if idV2 == nil {
- return nil, errors.New("missing object owner")
- }
-
- var idOwner user.ID
-
- err = idOwner.ReadFromV2(*idV2)
- if err != nil {
- return nil, fmt.Errorf("invalid object owner: %w", err)
- }
-
- obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID())
- if err != nil {
- return nil, err
- }
-
- var sTok *sessionSDK.Object
- sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
- if err != nil {
- return nil, err
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectPut)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !b.checker.CheckBasicACL(reqInfo) || !b.checker.StickyBitCheck(reqInfo, idOwner) {
- return nil, basicACLErr(reqInfo)
- }
- if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
- }
-
- return b.next.PutSingle(requestContext(ctx, reqInfo), request)
-}
-
-func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
- body := request.GetBody()
- if body == nil {
- return errEmptyBody
- }
-
- part := body.GetObjectPart()
- if part, ok := part.(*objectV2.PutObjectPartInit); ok {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- idV2 := part.GetHeader().GetOwnerID()
- if idV2 == nil {
- return errors.New("missing object owner")
- }
-
- var idOwner user.ID
-
- err = idOwner.ReadFromV2(*idV2)
- if err != nil {
- return fmt.Errorf("invalid object owner: %w", err)
- }
-
- objV2 := part.GetObjectID()
- var obj *oid.ID
-
- if objV2 != nil {
- obj = new(oid.ID)
-
- err = obj.ReadFromV2(*objV2)
- if err != nil {
- return err
- }
- }
-
- var sTok *sessionSDK.Object
- sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
- if err != nil {
- return err
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if reqInfo.IsSoftAPECheck() {
- if !p.source.checker.CheckBasicACL(reqInfo) || !p.source.checker.StickyBitCheck(reqInfo, idOwner) {
- return basicACLErr(reqInfo)
- }
- }
-
- ctx = requestContext(ctx, reqInfo)
- }
-
- return p.next.Send(ctx, request)
-}
-
-func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
- var sTok *sessionSDK.Object
-
- if tokV2 != nil {
- sTok = new(sessionSDK.Object)
-
- err := sTok.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
- // if session relates to object's removal, we don't check
- // relation of the tombstone to the session here since user
- // can't predict tomb's ID.
- err = assertSessionRelation(*sTok, cnr, nil)
- } else {
- err = assertSessionRelation(*sTok, cnr, obj)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- return sTok, nil
-}
-
-func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
- return p.next.CloseAndRecv(ctx)
-}
-
-func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
- if _, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
- }
-
- return g.GetObjectStream.Send(resp)
-}
-
-func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
-
- return g.GetObjectRangeStream.Send(resp)
-}
-
-func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
-
- return g.SearchStream.Send(resp)
-}
-
-func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
- body := request.GetBody()
- if body == nil {
- return errEmptyBody
- }
-
- if !p.nonFirstSend {
- p.nonFirstSend = true
-
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- objV2 := request.GetBody().GetAddress().GetObjectID()
- if objV2 == nil {
- return errors.New("missing oid")
- }
- obj := new(oid.ID)
- err = obj.ReadFromV2(*objV2)
- if err != nil {
- return err
- }
-
- var sTok *sessionSDK.Object
- sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
- if err != nil {
- return err
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(req, cnr)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- ctx = requestContext(ctx, reqInfo)
- }
-
- return p.next.Send(ctx, request)
-}
-
-func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
- return p.next.CloseAndRecv(ctx)
-}
-
-func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
- cnr, err := b.containers.Get(idCnr) // fetch actual container
- if err != nil {
- return info, err
- }
-
- if req.token != nil {
- currentEpoch, err := b.nm.Epoch()
- if err != nil {
- return info, errors.New("can't fetch current epoch")
- }
- if req.token.ExpiredAt(currentEpoch) {
- return info, new(apistatus.SessionTokenExpired)
- }
- if req.token.InvalidAt(currentEpoch) {
- return info, fmt.Errorf("%s: token is invalid at %d epoch)",
- invalidRequestMessage, currentEpoch)
- }
-
- if !assertVerb(*req.token, op) {
- return info, errInvalidVerb
- }
- }
-
- // find request role and key
- ownerID, ownerKey, err := req.RequestOwner()
- if err != nil {
- return info, err
- }
- res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
- if err != nil {
- return info, err
- }
-
- info.basicACL = cnr.Value.BasicACL()
- info.requestRole = res.Role
- info.operation = op
- info.cnrOwner = cnr.Value.Owner()
- info.idCnr = idCnr
-
- cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
- if hasNamespace {
- info.cnrNamespace = cnrNamespace
- }
-
- // it is assumed that at the moment the key will be valid,
- // otherwise the request would not pass validation
- info.senderKey = res.Key
-
- // add bearer token if it is present in request
- info.bearer = req.bearer
-
- info.srcRequest = req.src
-
- return info, nil
-}
-
-// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert.
-func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) {
- cnr, err := b.containers.Get(idCnr) // fetch actual container
- if err != nil {
- return info, err
- }
-
- if req.token != nil {
- currentEpoch, err := b.nm.Epoch()
- if err != nil {
- return info, errors.New("can't fetch current epoch")
- }
- if req.token.ExpiredAt(currentEpoch) {
- return info, new(apistatus.SessionTokenExpired)
- }
- if req.token.InvalidAt(currentEpoch) {
- return info, fmt.Errorf("%s: token is invalid at %d epoch)",
- invalidRequestMessage, currentEpoch)
- }
- }
-
- // find request role and key
- ownerID, ownerKey, err := req.RequestOwner()
- if err != nil {
- return info, err
- }
- res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
- if err != nil {
- return info, err
- }
-
- info.basicACL = cnr.Value.BasicACL()
- info.requestRole = res.Role
- info.cnrOwner = cnr.Value.Owner()
- info.idCnr = idCnr
-
- cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
- if hasNamespace {
- info.cnrNamespace = cnrNamespace
- }
-
- // it is assumed that at the moment the key will be valid,
- // otherwise the request would not pass validation
- info.senderKey = res.Key
-
- // add bearer token if it is present in request
- info.bearer = req.bearer
-
- info.srcRequest = req.src
-
- return info, nil
-}
diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go
deleted file mode 100644
index 061cd26b6..000000000
--- a/pkg/services/object/acl/v2/types.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-// ACLChecker is an interface that must provide
-// ACL related checks.
-type ACLChecker interface {
- // CheckBasicACL must return true only if request
- // passes basic ACL validation.
- CheckBasicACL(RequestInfo) bool
- // CheckEACL must return non-nil error if request
- // doesn't pass extended ACL validation.
- CheckEACL(any, RequestInfo) error
- // StickyBitCheck must return true only if sticky bit
- // is disabled or enabled but request contains correct
- // owner field.
- StickyBitCheck(RequestInfo, user.ID) bool
-}
-
-// InnerRingFetcher is an interface that must provide
-// Inner Ring information.
-type InnerRingFetcher interface {
- // InnerRingKeys must return list of public keys of
- // the actual inner ring.
- InnerRingKeys() ([][]byte, error)
-}
diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go
deleted file mode 100644
index 435339683..000000000
--- a/pkg/services/object/acl/v2/util_test.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package v2
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
- aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestOriginalTokens(t *testing.T) {
- sToken := sessiontest.ObjectSigned()
- bToken := bearertest.Token()
-
- pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- require.NoError(t, bToken.Sign(*pk))
-
- var bTokenV2 acl.BearerToken
- bToken.WriteToV2(&bTokenV2)
- // This line is needed because SDK uses some custom format for
- // reserved filters, so `cid.ID` is not converted to string immediately.
- require.NoError(t, bToken.ReadFromV2(bTokenV2))
-
- var sTokenV2 session.Token
- sToken.WriteToV2(&sTokenV2)
-
- for i := range 10 {
- metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
- res, err := originalSessionToken(metaHeaders)
- require.NoError(t, err)
- require.Equal(t, sToken, res, i)
-
- bTok, err := originalBearerToken(metaHeaders)
- require.NoError(t, err)
- require.Equal(t, &bToken, bTok, i)
- }
-}
-
-func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader {
- metaHeader := new(session.RequestMetaHeader)
- metaHeader.SetBearerToken(b)
- metaHeader.SetSessionToken(s)
-
- for i := uint32(0); i < depth; i++ {
- link := metaHeader
- metaHeader = new(session.RequestMetaHeader)
- metaHeader.SetOrigin(link)
- }
-
- return metaHeader
-}
-
-func TestIsVerbCompatible(t *testing.T) {
- // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28
- table := map[aclsdk.Op][]sessionSDK.ObjectVerb{
- aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete},
- aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete},
- aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet},
- aclsdk.OpObjectHead: {
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- },
- aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash},
- aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash},
- aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
- }
-
- verbs := []sessionSDK.ObjectVerb{
- sessionSDK.VerbObjectPut,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectSearch,
- }
-
- var tok sessionSDK.Object
-
- for op, list := range table {
- for _, verb := range verbs {
- var contains bool
- for _, v := range list {
- if v == verb {
- contains = true
- break
- }
- }
-
- tok.ForVerb(verb)
-
- require.Equal(t, contains, assertVerb(tok, op),
- "%v in token, %s executing", verb, op)
- }
- }
-}
-
-func TestAssertSessionRelation(t *testing.T) {
- var tok sessionSDK.Object
- cnr := cidtest.ID()
- cnrOther := cidtest.ID()
- obj := oidtest.ID()
- objOther := oidtest.ID()
-
- // make sure ids differ, otherwise test won't work correctly
- require.False(t, cnrOther.Equals(cnr))
- require.False(t, objOther.Equals(obj))
-
- // bind session to the container (required)
- tok.BindContainer(cnr)
-
- // test container-global session
- require.NoError(t, assertSessionRelation(tok, cnr, nil))
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnrOther, nil))
- require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
-
- // limit the session to the particular object
- tok.LimitByObjects(obj)
-
- // test fixed object session (here obj arg must be non-nil everywhere)
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnr, &objOther))
-}
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
index 3688638d0..bb6067a37 100644
--- a/pkg/services/object/ape/checker.go
+++ b/pkg/services/object/ape/checker.go
@@ -5,12 +5,12 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -64,11 +64,8 @@ type Prm struct {
// An encoded container's owner user ID.
ContainerOwner user.ID
- // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
- SoftAPECheck bool
-
- // If true, object headers will not retrieved from storage engine.
- WithoutHeaderRequest bool
+ // Attributes defined for the container.
+ ContainerAttributes map[string]string
// The request's bearer token. It is used in order to check APE overrides with the token.
BearerToken *bearer.Token
@@ -82,9 +79,10 @@ var errMissingOID = errors.New("object ID is not set")
// CheckAPE prepares an APE-request and checks if it is permitted by policies.
func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
// APE check is ignored for some inter-node requests.
- if prm.Role == nativeschema.PropertyValueContainerRoleContainer {
+ switch prm.Role {
+ case nativeschema.PropertyValueContainerRoleContainer:
return nil
- } else if prm.Role == nativeschema.PropertyValueContainerRoleIR {
+ case nativeschema.PropertyValueContainerRoleIR:
switch prm.Method {
case nativeschema.MethodGetObject,
nativeschema.MethodHeadObject,
@@ -105,13 +103,12 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
return err
}
- return c.checkerCore.CheckAPE(checkercore.CheckPrm{
+ return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{
Request: r,
PublicKey: pub,
- Namespace: prm.Method,
+ Namespace: prm.Namespace,
Container: prm.Container,
ContainerOwner: prm.ContainerOwner,
BearerToken: prm.BearerToken,
- SoftAPECheck: prm.SoftAPECheck,
})
}
diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go
index 090f6a83c..97eb2b2d7 100644
--- a/pkg/services/object/ape/checker_test.go
+++ b/pkg/services/object/ape/checker_test.go
@@ -8,13 +8,13 @@ import (
"fmt"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
@@ -219,7 +219,7 @@ func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
return pk.GetScriptHash()
}
-func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) {
+func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
v, ok := f.subjects[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -227,7 +227,7 @@ func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, e
return v, nil
}
-func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
v, ok := f.subjectsExtended[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -619,21 +619,21 @@ type netmapStub struct {
currentEpoch uint64
}
-func (s *netmapStub) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
+func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
if diff >= s.currentEpoch {
return nil, errors.New("invalid diff")
}
- return s.GetNetMapByEpoch(s.currentEpoch - diff)
+ return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
}
-func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
+func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, errors.New("netmap not found")
}
-func (s *netmapStub) Epoch() (uint64, error) {
+func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
return s.currentEpoch, nil
}
@@ -641,18 +641,18 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container
}
-func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
+func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
-func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
+func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
return nil, nil
}
-func TestPutECChunk(t *testing.T) {
+func TestGetECChunk(t *testing.T) {
headerProvider := newHeaderProviderMock()
frostfsidProvider := newFrostfsIDProviderMock(t)
@@ -666,11 +666,10 @@ func TestPutECChunk(t *testing.T) {
Rules: []chain.Rule{
{
Status: chain.AccessDenied,
- Actions: chain.Actions{Names: methodsOptionalOID},
+ Actions: chain.Actions{Names: methodsRequiredOID},
Resources: chain.Resources{
Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
},
- Any: true,
Condition: []chain.Condition{
{
Op: chain.CondStringEquals,
@@ -680,21 +679,32 @@ func TestPutECChunk(t *testing.T) {
},
},
},
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: methodsRequiredOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ },
},
- MatchType: chain.MatchTypeFirstMatch,
})
node1Key, err := keys.NewPrivateKey()
require.NoError(t, err)
node1 := netmapSDK.NodeInfo{}
node1.SetPublicKey(node1Key.PublicKey().Bytes())
+ node2Key, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ node2 := netmapSDK.NodeInfo{}
+ node2.SetPublicKey(node1Key.PublicKey().Bytes())
netmap := &netmapSDK.NetMap{}
netmap.SetEpoch(100)
- netmap.SetNodes([]netmapSDK.NodeInfo{node1})
+ netmap.SetNodes([]netmapSDK.NodeInfo{node1, node2})
nm := &netmapStub{
currentEpoch: 100,
netmaps: map[uint64]*netmapSDK.NetMap{
+ 99: netmap,
100: netmap,
},
}
@@ -702,7 +712,7 @@ func TestPutECChunk(t *testing.T) {
cont := containerSDK.Container{}
cont.Init()
pp := netmapSDK.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
+ require.NoError(t, pp.DecodeString("EC 1.1"))
cont.SetPlacementPolicy(pp)
cs := &testContainerSource{
containers: map[cid.ID]*container.Container{
@@ -718,7 +728,7 @@ func TestPutECChunk(t *testing.T) {
chunkHeader := newHeaderObjectSDK(cnr, obj, nil).ToV2().GetHeader()
ecHeader := object.ECHeader{
Index: 1,
- Total: 5,
+ Total: 2,
Parent: &refs.ObjectID{},
}
chunkHeader.SetEC(&ecHeader)
@@ -737,32 +747,33 @@ func TestPutECChunk(t *testing.T) {
})
headerProvider.addHeader(cnr, ecParentID, parentHeader)
- t.Run("access denied for container node", func(t *testing.T) {
+ // container node requests EC parent headers, so container node denies access by matching attribute key/value
+ t.Run("access denied on container node", func(t *testing.T) {
prm := Prm{
- Method: nativeschema.MethodPutObject,
- Container: cnr,
- Object: obj,
- Role: role,
- SenderKey: senderKey,
- Header: chunkHeader,
- SoftAPECheck: true,
+ Method: nativeschema.MethodGetObject,
+ Container: cnr,
+ Object: obj,
+ Role: role,
+ SenderKey: hex.EncodeToString(node2Key.PublicKey().Bytes()),
+ Header: chunkHeader,
}
err = checker.CheckAPE(context.Background(), prm)
require.Error(t, err)
})
- t.Run("access allowed for non container node", func(t *testing.T) {
+
+ // non container node has no access rights to collect EC parent header, so it uses EC chunk headers
+ t.Run("access allowed on non container node", func(t *testing.T) {
otherKey, err := keys.NewPrivateKey()
require.NoError(t, err)
checker = NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, otherKey.PublicKey().Bytes())
prm := Prm{
- Method: nativeschema.MethodPutObject,
- Container: cnr,
- Object: obj,
- Role: nativeschema.PropertyValueContainerRoleOthers,
- SenderKey: senderKey,
- Header: chunkHeader,
- SoftAPECheck: true,
+ Method: nativeschema.MethodGetObject,
+ Container: cnr,
+ Object: obj,
+ Role: nativeschema.PropertyValueContainerRoleOthers,
+ SenderKey: senderKey,
+ Header: chunkHeader,
}
err = checker.CheckAPE(context.Background(), prm)
diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go
index 1b2024ed5..82e660a7f 100644
--- a/pkg/services/object/ape/errors.go
+++ b/pkg/services/object/ape/errors.go
@@ -1,10 +1,34 @@
package ape
import (
+ "errors"
+
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
+var (
+ errMissingContainerID = malformedRequestError("missing container ID")
+ errEmptyVerificationHeader = malformedRequestError("empty verification header")
+ errEmptyBodySig = malformedRequestError("empty at body signature")
+ errInvalidSessionSig = malformedRequestError("invalid session token signature")
+ errInvalidSessionOwner = malformedRequestError("invalid session token owner")
+ errInvalidVerb = malformedRequestError("session token verb is invalid")
+)
+
+func malformedRequestError(reason string) error {
+ invalidArgErr := &apistatus.InvalidArgument{}
+ invalidArgErr.SetMessage(reason)
+ return invalidArgErr
+}
+
func toStatusErr(err error) error {
+ var chRouterErr *checkercore.ChainRouterError
+ if !errors.As(err, &chRouterErr) {
+ errServerInternal := &apistatus.ServerInternal{}
+ apistatus.WriteInternalServerErr(errServerInternal, err)
+ return errServerInternal
+ }
errAccessDenied := &apistatus.ObjectAccessDenied{}
errAccessDenied.WriteReason("ape denied request: " + err.Error())
return errAccessDenied
diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go
new file mode 100644
index 000000000..102985aa6
--- /dev/null
+++ b/pkg/services/object/ape/metadata.go
@@ -0,0 +1,179 @@
+package ape
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+type Metadata struct {
+ Container cid.ID
+ Object *oid.ID
+ MetaHeader *session.RequestMetaHeader
+ VerificationHeader *session.RequestVerificationHeader
+ SessionToken *sessionSDK.Object
+ BearerToken *bearer.Token
+}
+
+func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) {
+ if m.VerificationHeader == nil {
+ return nil, nil, errEmptyVerificationHeader
+ }
+
+ if m.BearerToken != nil && m.BearerToken.Impersonate() {
+ return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes())
+ }
+
+ // if session token is presented, use it as truth source
+ if m.SessionToken != nil {
+ // verify signature of session token
+ return ownerFromToken(m.SessionToken)
+ }
+
+ // otherwise get original body signature
+ bodySignature := originalBodySignature(m.VerificationHeader)
+ if bodySignature == nil {
+ return nil, nil, errEmptyBodySig
+ }
+
+ return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
+}
+
+// RequestInfo contains request information extracted by request metadata.
+type RequestInfo struct {
+ // Role defines under which role this request is executed.
+ // It must be represented only as a constant represented in native schema.
+ Role string
+
+ ContainerOwner user.ID
+
+ ContainerAttributes map[string]string
+
+ // Namespace defines to which namespace a container is belonged.
+ Namespace string
+
+ // HEX-encoded sender key.
+ SenderKey string
+}
+
+type RequestInfoExtractor interface {
+ GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error)
+}
+
+type extractor struct {
+ containers container.Source
+
+ nm netmap.Source
+
+ classifier objectCore.SenderClassifier
+}
+
+func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor {
+ return &extractor{
+ containers: containers,
+ nm: nm,
+ classifier: objectCore.NewSenderClassifier(irFetcher, nm, log),
+ }
+}
+
+func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error {
+ currentEpoch, err := e.nm.Epoch(ctx)
+ if err != nil {
+ return errors.New("can't fetch current epoch")
+ }
+ if sessionToken.ExpiredAt(currentEpoch) {
+ return new(apistatus.SessionTokenExpired)
+ }
+ if sessionToken.InvalidAt(currentEpoch) {
+ return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch)
+ }
+ if !assertVerb(*sessionToken, method) {
+ return errInvalidVerb
+ }
+ return nil
+}
+
+func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) {
+ cnr, err := e.containers.Get(ctx, m.Container)
+ if err != nil {
+ return ri, err
+ }
+
+ if m.SessionToken != nil {
+ if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil {
+ return ri, err
+ }
+ }
+
+ ownerID, ownerKey, err := m.RequestOwner()
+ if err != nil {
+ return ri, err
+ }
+ res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value)
+ if err != nil {
+ return ri, err
+ }
+
+ ri.Role = nativeSchemaRole(res.Role)
+ ri.ContainerOwner = cnr.Value.Owner()
+
+ ri.ContainerAttributes = map[string]string{}
+ for key, val := range cnr.Value.Attributes() {
+ ri.ContainerAttributes[key] = val
+ }
+
+ cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
+ if hasNamespace {
+ ri.Namespace = cnrNamespace
+ }
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ ri.SenderKey = hex.EncodeToString(res.Key)
+
+ return ri, nil
+}
+
+func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
+ var sTok *sessionSDK.Object
+
+ if tokV2 != nil {
+ sTok = new(sessionSDK.Object)
+
+ err := sTok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
+ // if session relates to object's removal, we don't check
+ // relation of the tombstone to the session here since user
+ // can't predict tomb's ID.
+ err = assertSessionRelation(*sTok, cnr, nil)
+ } else {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return sTok, nil
+}
diff --git a/pkg/services/object/acl/v2/request_test.go b/pkg/services/object/ape/metadata_test.go
similarity index 79%
rename from pkg/services/object/acl/v2/request_test.go
rename to pkg/services/object/ape/metadata_test.go
index 980d1a2e5..fd919008f 100644
--- a/pkg/services/object/acl/v2/request_test.go
+++ b/pkg/services/object/ape/metadata_test.go
@@ -1,11 +1,11 @@
-package v2
+package ape
import (
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) {
vh.SetBodySignature(&userSignature)
t.Run("empty verification header", func(t *testing.T) {
- req := MetaWithToken{}
+ req := Metadata{}
checkOwner(t, req, nil, errEmptyVerificationHeader)
})
t.Run("empty verification header signature", func(t *testing.T) {
- req := MetaWithToken{
- vheader: new(sessionV2.RequestVerificationHeader),
+ req := Metadata{
+ VerificationHeader: new(sessionV2.RequestVerificationHeader),
}
checkOwner(t, req, nil, errEmptyBodySig)
})
t.Run("no tokens", func(t *testing.T) {
- req := MetaWithToken{
- vheader: vh,
+ req := Metadata{
+ VerificationHeader: vh,
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer without impersonate, no session", func(t *testing.T) {
- req := MetaWithToken{
- vheader: vh,
- bearer: newBearer(t, containerOwner, userID, false),
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, false),
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer with impersonate, no session", func(t *testing.T) {
- req := MetaWithToken{
- vheader: vh,
- bearer: newBearer(t, containerOwner, userID, true),
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, true),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
- req := MetaWithToken{
- vheader: vh,
- bearer: newBearer(t, containerOwner, userID, true),
- token: newSession(t, pk),
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, true),
+ SessionToken: newSession(t, pk),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
t.Run("with session", func(t *testing.T) {
- req := MetaWithToken{
- vheader: vh,
- token: newSession(t, containerOwner),
+ req := Metadata{
+ VerificationHeader: vh,
+ SessionToken: newSession(t, containerOwner),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) {
var tok sessionSDK.Object
require.NoError(t, tok.ReadFromV2(tokV2))
- req := MetaWithToken{
- vheader: vh,
- token: &tok,
+ req := Metadata{
+ VerificationHeader: vh,
+ SessionToken: &tok,
}
checkOwner(t, req, nil, errInvalidSessionOwner)
})
@@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool
return &tok
}
-func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) {
+func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) {
_, actual, err := req.RequestOwner()
if expectedErr != nil {
require.ErrorIs(t, err, expectedErr)
diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go
index da5307ca7..39dd7f476 100644
--- a/pkg/services/object/ape/request.go
+++ b/pkg/services/object/ape/request.go
@@ -3,14 +3,16 @@ package ape
import (
"context"
"crypto/sha256"
+ "errors"
"fmt"
"net"
"strconv"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -24,6 +26,8 @@ import (
var defaultRequest = aperequest.Request{}
+var errECMissingParentObjectID = errors.New("missing EC parent object ID")
+
func nativeSchemaRole(role acl.Role) string {
switch role {
case acl.RoleOwner:
@@ -53,11 +57,16 @@ func resourceName(cid cid.ID, oid *oid.ID, namespace string) string {
}
// objectProperties collects object properties from address parameters and a header if it is passed.
-func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string {
+func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string {
objectProps := map[string]string{
nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(),
}
+ for attrName, attrValue := range cnrAttrs {
+ prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName)
+ objectProps[prop] = attrValue
+ }
+
objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString()
if oid != nil {
@@ -116,13 +125,16 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
var header *objectV2.Header
if prm.Header != nil {
header = prm.Header
- } else if prm.Object != nil && !prm.WithoutHeaderRequest {
+ } else if prm.Object != nil {
headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object, true)
if err == nil {
header = headerObjSDK.ToV2().GetHeader()
}
}
- header = c.fillHeaderWithECParent(ctx, prm, header)
+ header, err := c.fillHeaderWithECParent(ctx, prm, header)
+ if err != nil {
+ return defaultRequest, fmt.Errorf("get EC parent header: %w", err)
+ }
reqProps := map[string]string{
nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
nativeschema.PropertyKeyActorRole: prm.Role,
@@ -133,8 +145,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
reqProps[xheadKey] = xhead.GetValue()
}
- var err error
- reqProps, err = c.fillWithUserClaimTags(reqProps, prm)
+ reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm)
if err != nil {
return defaultRequest, err
}
@@ -149,50 +160,58 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
prm.Method,
aperequest.NewResource(
resourceName(prm.Container, prm.Object, prm.Namespace),
- objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header),
+ objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header),
),
reqProps,
), nil
}
-func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) *objectV2.Header {
+func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) (*objectV2.Header, error) {
if header == nil {
- return header
+ return header, nil
}
if header.GetEC() == nil {
- return header
- }
- if prm.Role == nativeschema.PropertyValueContainerRoleContainer ||
- prm.Role == nativeschema.PropertyValueContainerRoleIR {
- return header
+ return header, nil
}
parentObjRefID := header.GetEC().Parent
if parentObjRefID == nil {
- return header
+ return nil, errECMissingParentObjectID
}
var parentObjID oid.ID
if err := parentObjID.ReadFromV2(*parentObjRefID); err != nil {
- return header
+ return nil, fmt.Errorf("EC parent object ID format error: %w", err)
}
// only container node have access to collect parent object
- contNode, err := c.currentNodeIsContainerNode(prm.Container)
- if err != nil || !contNode {
- return header
+ contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container)
+ if err != nil {
+ return nil, fmt.Errorf("check container node status: %w", err)
+ }
+ if !contNode {
+ return header, nil
}
parentObj, err := c.headerProvider.GetHeader(ctx, prm.Container, parentObjID, false)
if err != nil {
- return header
+ if isLogicalError(err) {
+ return header, nil
+ }
+ return nil, fmt.Errorf("EC parent header request: %w", err)
}
- return parentObj.ToV2().GetHeader()
+ return parentObj.ToV2().GetHeader(), nil
}
-func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) {
- cnr, err := c.cnrSource.Get(cnrID)
+func isLogicalError(err error) bool {
+ var errObjRemoved *apistatus.ObjectAlreadyRemoved
+ var errObjNotFound *apistatus.ObjectNotFound
+ return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound)
+}
+
+func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) {
+ cnr, err := c.cnrSource.Get(ctx, cnrID)
if err != nil {
return false, err
}
- nm, err := netmap.GetLatestNetworkMap(c.nm)
+ nm, err := netmap.GetLatestNetworkMap(ctx, c.nm)
if err != nil {
return false, err
}
@@ -206,7 +225,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) {
return true, nil
}
- nm, err = netmap.GetPreviousNetworkMap(c.nm)
+ nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm)
if err != nil {
return false, err
}
@@ -215,7 +234,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) {
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) (map[string]string, error) {
+func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
@@ -223,7 +242,7 @@ func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm)
if err != nil {
return nil, err
}
- props, err := aperequest.FormFrostfsIDRequestProperties(c.frostFSIDClient, pk)
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go
index 9dad69d17..fcf7c4c40 100644
--- a/pkg/services/object/ape/request_test.go
+++ b/pkg/services/object/ape/request_test.go
@@ -6,8 +6,9 @@ import (
"net"
"testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -19,11 +20,20 @@ import (
)
const (
- testOwnerID = "FPPtmAi9TCX329"
+ testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y"
incomingIP = "192.92.33.1"
+
+ testSysAttrName = "unittest"
+
+ testSysAttrZone = "eggplant"
)
+var containerAttrs = map[string]string{
+ cnrV2.SysAttributeName: testSysAttrName,
+ cnrV2.SysAttributeZone: testSysAttrZone,
+}
+
func ctxWithPeerInfo() context.Context {
return peer.NewContext(context.Background(), &peer.Peer{
Addr: &net.TCPAddr{
@@ -105,7 +115,7 @@ func TestObjectProperties(t *testing.T) {
var testCnrOwner user.ID
require.NoError(t, testCnrOwner.DecodeString(testOwnerID))
- props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader())
+ props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader())
require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID])
require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID])
@@ -124,6 +134,8 @@ func TestObjectProperties(t *testing.T) {
require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType])
require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash])
require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash])
+ require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)])
+ require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)])
for _, attr := range test.header.attributes {
require.Equal(t, attr.val, props[attr.key])
@@ -245,6 +257,10 @@ func TestNewAPERequest(t *testing.T) {
Role: role,
SenderKey: senderKey,
ContainerOwner: testCnrOwner,
+ ContainerAttributes: map[string]string{
+ cnrV2.SysAttributeZone: testSysAttrZone,
+ cnrV2.SysAttributeName: testSysAttrName,
+ },
}
headerSource := newHeaderProviderMock()
@@ -277,7 +293,7 @@ func TestNewAPERequest(t *testing.T) {
method,
aperequest.NewResource(
resourceName(cnr, obj, prm.Namespace),
- objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
+ objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header {
if headerObjSDK != nil {
return headerObjSDK.ToV2().GetHeader()
}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index a1634e7c5..5e04843f3 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -2,32 +2,25 @@ package ape
import (
"context"
- "encoding/hex"
- "errors"
- "fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
)
-var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
-
type Service struct {
- log *logger.Logger
-
apeChecker Checker
+ extractor RequestInfoExtractor
+
next objectSvc.ServiceServer
}
@@ -67,10 +60,10 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
}
}
-func NewService(log *logger.Logger, apeChecker Checker, next objectSvc.ServiceServer) *Service {
+func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service {
return &Service{
- log: log,
apeChecker: apeChecker,
+ extractor: extractor,
next: next,
}
}
@@ -80,17 +73,9 @@ type getStreamBasicChecker struct {
apeChecker Checker
- namespace string
+ metadata Metadata
- senderKey []byte
-
- containerOwner user.ID
-
- role string
-
- softAPECheck bool
-
- bearerToken *bearer.Token
+ reqInfo RequestInfo
}
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
@@ -101,17 +86,17 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
}
prm := Prm{
- Namespace: g.namespace,
- Container: cnrID,
- Object: objID,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodGetObject,
- SenderKey: hex.EncodeToString(g.senderKey),
- ContainerOwner: g.containerOwner,
- Role: g.role,
- SoftAPECheck: g.softAPECheck,
- BearerToken: g.bearerToken,
- XHeaders: resp.GetMetaHeader().GetXHeaders(),
+ Namespace: g.reqInfo.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodGetObject,
+ SenderKey: g.reqInfo.SenderKey,
+ ContainerOwner: g.reqInfo.ContainerOwner,
+ ContainerAttributes: g.reqInfo.ContainerAttributes,
+ Role: g.reqInfo.Role,
+ BearerToken: g.metadata.BearerToken,
+ XHeaders: resp.GetMetaHeader().GetXHeaders(),
}
if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil {
@@ -121,66 +106,54 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
return g.GetObjectStream.Send(resp)
}
-func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) {
- untyped := ctx.Value(objectSvc.RequestContextKey)
- if untyped == nil {
- return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey)
- }
- rc, ok := untyped.(*objectSvc.RequestContext)
- if !ok {
- return nil, errFailedToCastToRequestContext
- }
- return rc, nil
-}
-
func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
- reqCtx, err := requestContext(stream.Context())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return toStatusErr(err)
+ return err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject)
+ if err != nil {
+ return err
}
-
return c.next.Get(request, &getStreamBasicChecker{
GetObjectStream: stream,
apeChecker: c.apeChecker,
- namespace: reqCtx.Namespace,
- senderKey: reqCtx.SenderKey,
- containerOwner: reqCtx.ContainerOwner,
- role: nativeSchemaRole(reqCtx.Role),
- softAPECheck: reqCtx.SoftAPECheck,
- bearerToken: reqCtx.BearerToken,
+ metadata: md,
+ reqInfo: reqInfo,
})
}
type putStreamBasicChecker struct {
apeChecker Checker
+ extractor RequestInfoExtractor
+
next objectSvc.PutObjectStream
}
func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- reqCtx, err := requestContext(ctx)
+ md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
if err != nil {
- return toStatusErr(err)
+ return err
}
-
- cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
+ reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
if err != nil {
- return toStatusErr(err)
+ return err
}
prm := Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodPutObject,
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- Role: nativeSchemaRole(reqCtx.Role),
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ Role: reqInfo.Role,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -195,11 +168,12 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR
return p.next.CloseAndRecv(ctx)
}
-func (c *Service) Put() (objectSvc.PutObjectStream, error) {
- streamer, err := c.next.Put()
+func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
+ streamer, err := c.next.Put(ctx)
return &putStreamBasicChecker{
apeChecker: c.apeChecker,
+ extractor: c.extractor,
next: streamer,
}, err
}
@@ -207,6 +181,8 @@ func (c *Service) Put() (objectSvc.PutObjectStream, error) {
type patchStreamBasicChecker struct {
apeChecker Checker
+ extractor RequestInfoExtractor
+
next objectSvc.PatchObjectStream
nonFirstSend bool
@@ -216,27 +192,26 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa
if !p.nonFirstSend {
p.nonFirstSend = true
- reqCtx, err := requestContext(ctx)
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return toStatusErr(err)
+ return err
}
-
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject)
if err != nil {
- return toStatusErr(err)
+ return err
}
prm := Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Method: nativeschema.MethodPatchObject,
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- Role: nativeSchemaRole(reqCtx.Role),
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodPatchObject,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ Role: reqInfo.Role,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -251,22 +226,22 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa
return p.next.CloseAndRecv(ctx)
}
-func (c *Service) Patch() (objectSvc.PatchObjectStream, error) {
- streamer, err := c.next.Patch()
+func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) {
+ streamer, err := c.next.Patch(ctx)
return &patchStreamBasicChecker{
apeChecker: c.apeChecker,
+ extractor: c.extractor,
next: streamer,
}, err
}
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
-
- reqCtx, err := requestContext(ctx)
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject)
if err != nil {
return nil, err
}
@@ -280,7 +255,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
switch headerPart := resp.GetBody().GetHeaderPart().(type) {
case *objectV2.ShortHeader:
cidV2 := new(refs.ContainerID)
- cnrID.WriteToV2(cidV2)
+ md.Container.WriteToV2(cidV2)
header.SetContainerID(cidV2)
header.SetVersion(headerPart.GetVersion())
header.SetCreationEpoch(headerPart.GetCreationEpoch())
@@ -296,17 +271,17 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Header: header,
- Method: nativeschema.MethodHeadObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: header,
+ Method: nativeschema.MethodHeadObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@@ -315,28 +290,25 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
- var cnrID cid.ID
- if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil {
- if err := cnrID.ReadFromV2(*cnrV2); err != nil {
- return toStatusErr(err)
- }
- }
-
- reqCtx, err := requestContext(stream.Context())
+ md, err := newMetadata(request, request.GetBody().GetContainerID(), nil)
if err != nil {
- return toStatusErr(err)
+ return err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject)
+ if err != nil {
+ return err
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Method: nativeschema.MethodSearchObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Method: nativeschema.MethodSearchObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@@ -346,27 +318,26 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc
}
func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
-
- reqCtx, err := requestContext(ctx)
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject)
if err != nil {
return nil, err
}
err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Method: nativeschema.MethodDeleteObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodDeleteObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@@ -381,27 +352,26 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (
}
func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return toStatusErr(err)
+ return err
}
-
- reqCtx, err := requestContext(stream.Context())
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject)
if err != nil {
- return toStatusErr(err)
+ return err
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Method: nativeschema.MethodRangeObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodRangeObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@@ -411,27 +381,26 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G
}
func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
-
- reqCtx, err := requestContext(ctx)
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject)
if err != nil {
return nil, err
}
prm := Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Method: nativeschema.MethodHashObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodHashObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
}
resp, err := c.next.GetRangeHash(ctx, request)
@@ -446,28 +415,27 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa
}
func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
- cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
+ md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
if err != nil {
return nil, err
}
-
- reqCtx, err := requestContext(ctx)
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
if err != nil {
return nil, err
}
prm := Prm{
- Namespace: reqCtx.Namespace,
- Container: cnrID,
- Object: objID,
- Header: request.GetBody().GetObject().GetHeader(),
- Method: nativeschema.MethodPutObject,
- Role: nativeSchemaRole(reqCtx.Role),
- SenderKey: hex.EncodeToString(reqCtx.SenderKey),
- ContainerOwner: reqCtx.ContainerOwner,
- SoftAPECheck: reqCtx.SoftAPECheck,
- BearerToken: reqCtx.BearerToken,
- XHeaders: request.GetMetaHeader().GetXHeaders(),
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: request.GetBody().GetObject().GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
}
if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -477,18 +445,36 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ
return c.next.PutSingle(ctx, request)
}
-func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
- if cidV2 != nil {
- if err = cnrID.ReadFromV2(*cidV2); err != nil {
- return
- }
+type request interface {
+ GetMetaHeader() *session.RequestMetaHeader
+ GetVerificationHeader() *session.RequestVerificationHeader
+}
+
+func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) {
+ meta := request.GetMetaHeader()
+ for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
+ meta = origin
}
- if objV2 != nil {
- objID = new(oid.ID)
- if err = objID.ReadFromV2(*objV2); err != nil {
- return
- }
+ cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2)
+ if err != nil {
+ return
+ }
+ session, err := readSessionToken(cnrID, objID, meta.GetSessionToken())
+ if err != nil {
+ return
+ }
+ bearer, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return
+ }
+
+ md = Metadata{
+ Container: cnrID,
+ Object: objID,
+ VerificationHeader: request.GetVerificationHeader(),
+ SessionToken: session,
+ BearerToken: bearer,
}
return
}
diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go
index 46e55360d..97dbfa658 100644
--- a/pkg/services/object/ape/types.go
+++ b/pkg/services/object/ape/types.go
@@ -7,3 +7,11 @@ import "context"
type Checker interface {
CheckAPE(context.Context, Prm) error
}
+
+// InnerRingFetcher is an interface that must provide
+// Inner Ring information.
+type InnerRingFetcher interface {
+ // InnerRingKeys must return list of public keys of
+ // the actual inner ring.
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
+}
diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/ape/util.go
similarity index 56%
rename from pkg/services/object/acl/v2/util.go
rename to pkg/services/object/ape/util.go
index c5225e8c4..5cd2caa50 100644
--- a/pkg/services/object/acl/v2/util.go
+++ b/pkg/services/object/ape/util.go
@@ -1,4 +1,4 @@
-package v2
+package ape
import (
"crypto/ecdsa"
@@ -6,57 +6,34 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-var errMissingContainerID = errors.New("missing container ID")
-
-func getContainerIDFromRequest(req any) (cid.ID, error) {
- var idV2 *refsV2.ContainerID
- var id cid.ID
-
- switch v := req.(type) {
- case *objectV2.GetRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.PutRequest:
- part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit)
- if !ok {
- return cid.ID{}, errors.New("can't get container ID in chunk")
+func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
+ if cidV2 != nil {
+ if err = cnrID.ReadFromV2(*cidV2); err != nil {
+ return
}
-
- idV2 = part.GetHeader().GetContainerID()
- case *objectV2.HeadRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.SearchRequest:
- idV2 = v.GetBody().GetContainerID()
- case *objectV2.DeleteRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.GetRangeRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.GetRangeHashRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.PutSingleRequest:
- idV2 = v.GetBody().GetObject().GetHeader().GetContainerID()
- case *objectV2.PatchRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- default:
- return cid.ID{}, errors.New("unknown request type")
+ } else {
+ err = errMissingContainerID
+ return
}
- if idV2 == nil {
- return cid.ID{}, errMissingContainerID
+ if objV2 != nil {
+ objID = new(oid.ID)
+ if err = objID.ReadFromV2(*objV2); err != nil {
+ return
+ }
}
-
- return id, id.ReadFromV2(*idV2)
+ return
}
// originalBearerToken goes down to original request meta header and fetches
@@ -75,50 +52,6 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er
return &tok, tok.ReadFromV2(*tokV2)
}
-// originalSessionToken goes down to original request meta header and fetches
-// session token from there.
-func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) {
- for header.GetOrigin() != nil {
- header = header.GetOrigin()
- }
-
- tokV2 := header.GetSessionToken()
- if tokV2 == nil {
- return nil, nil
- }
-
- var tok sessionSDK.Object
-
- err := tok.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- return &tok, nil
-}
-
-// getObjectIDFromRequestBody decodes oid.ID from the common interface of the
-// object reference's holders. Returns an error if object ID is missing in the request.
-func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) {
- idV2 := body.GetAddress().GetObjectID()
- return getObjectIDFromRefObjectID(idV2)
-}
-
-func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) {
- if idV2 == nil {
- return nil, errors.New("missing object ID")
- }
-
- var id oid.ID
-
- err := id.ReadFromV2(*idV2)
- if err != nil {
- return nil, err
- }
-
- return &id, nil
-}
-
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
// 1. First check signature of session token.
if !token.VerifySignature() {
@@ -172,16 +105,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-// assertVerb checks that token verb corresponds to op.
-func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
- switch op {
- case acl.OpObjectPut:
+// assertVerb checks that token verb corresponds to the method.
+func assertVerb(tok sessionSDK.Object, method string) bool {
+ switch method {
+ case nativeschema.MethodPutObject:
return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
- case acl.OpObjectDelete:
+ case nativeschema.MethodDeleteObject:
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
- case acl.OpObjectGet:
+ case nativeschema.MethodGetObject:
return tok.AssertVerb(sessionSDK.VerbObjectGet)
- case acl.OpObjectHead:
+ case nativeschema.MethodHeadObject:
return tok.AssertVerb(
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
@@ -190,14 +123,15 @@ func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
sessionSDK.VerbObjectRangeHash,
sessionSDK.VerbObjectPatch,
)
- case acl.OpObjectSearch:
+ case nativeschema.MethodSearchObject:
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
- case acl.OpObjectRange:
+ case nativeschema.MethodRangeObject:
return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
- case acl.OpObjectHash:
+ case nativeschema.MethodHashObject:
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
+ case nativeschema.MethodPatchObject:
+ return tok.AssertVerb(sessionSDK.VerbObjectPatch)
}
-
return false
}
@@ -221,3 +155,15 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error
return nil
}
+
+func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
+ key, err := unmarshalPublicKey(rawKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var idSender user.ID
+ user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
+
+ return &idSender, key, nil
+}
diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go
new file mode 100644
index 000000000..916bce427
--- /dev/null
+++ b/pkg/services/object/ape/util_test.go
@@ -0,0 +1,84 @@
+package ape
+
+import (
+ "slices"
+ "testing"
+
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsVerbCompatible(t *testing.T) {
+ table := map[string][]sessionSDK.ObjectVerb{
+ nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch},
+ nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete},
+ nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet},
+ nativeschema.MethodHeadObject: {
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectPatch,
+ },
+ nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch},
+ nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash},
+ nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
+ nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch},
+ }
+
+ verbs := []sessionSDK.ObjectVerb{
+ sessionSDK.VerbObjectPut,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectSearch,
+ sessionSDK.VerbObjectPatch,
+ }
+
+ var tok sessionSDK.Object
+
+ for op, list := range table {
+ for _, verb := range verbs {
+ contains := slices.Contains(list, verb)
+
+ tok.ForVerb(verb)
+
+ require.Equal(t, contains, assertVerb(tok, op),
+ "%v in token, %s executing", verb, op)
+ }
+ }
+}
+
+func TestAssertSessionRelation(t *testing.T) {
+ var tok sessionSDK.Object
+ cnr := cidtest.ID()
+ cnrOther := cidtest.ID()
+ obj := oidtest.ID()
+ objOther := oidtest.ID()
+
+ // make sure ids differ, otherwise test won't work correctly
+ require.False(t, cnrOther.Equals(cnr))
+ require.False(t, objOther.Equals(obj))
+
+ // bind session to the container (required)
+ tok.BindContainer(cnr)
+
+ // test container-global session
+ require.NoError(t, assertSessionRelation(tok, cnr, nil))
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnrOther, nil))
+ require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
+
+ // limit the session to the particular object
+ tok.LimitByObjects(obj)
+
+ // test fixed object session (here obj arg must be non-nil everywhere)
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnr, &objOther))
+}
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
index 39e1f9f2d..f8ee089fe 100644
--- a/pkg/services/object/audit.go
+++ b/pkg/services/object/audit.go
@@ -5,12 +5,12 @@ import (
"errors"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -37,7 +37,7 @@ func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (*
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return res, err
}
@@ -48,7 +48,7 @@ func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error
if !a.enabled.Load() {
return err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return err
}
@@ -59,7 +59,7 @@ func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRan
if !a.enabled.Load() {
return err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return err
}
@@ -70,7 +70,7 @@ func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHas
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return resp, err
}
@@ -81,19 +81,19 @@ func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*obje
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return resp, err
}
// Put implements ServiceServer.
-func (a *auditService) Put() (PutObjectStream, error) {
- res, err := a.next.Put()
+func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) {
+ res, err := a.next.Put(ctx)
if !a.enabled.Load() {
return res, err
}
if err != nil {
- audit.LogRequest(a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
return res, err
}
return &auditPutStream{
@@ -108,7 +108,7 @@ func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleReque
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(),
req.GetBody().GetObject().GetObjectID()),
err == nil)
@@ -121,7 +121,7 @@ func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) er
if !a.enabled.Load() {
return err
}
- audit.LogRequest(a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return err
}
@@ -145,7 +145,7 @@ func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse,
a.failed = true
}
a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
return resp, err
@@ -163,8 +163,8 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error
if err != nil {
a.failed = true
}
- if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
}
@@ -183,13 +183,13 @@ type auditPatchStream struct {
nonFirstSend bool
}
-func (a *auditService) Patch() (PatchObjectStream, error) {
- res, err := a.next.Patch()
+func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ res, err := a.next.Patch(ctx)
if !a.enabled.Load() {
return res, err
}
if err != nil {
- audit.LogRequest(a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
return res, err
}
return &auditPatchStream{
@@ -205,7 +205,7 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo
a.failed = true
}
a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
return resp, err
@@ -224,8 +224,8 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e
if err != nil {
a.failed = true
}
- if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
}
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index f48cc5b3d..ef65e78bc 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -3,7 +3,7 @@ package object
import (
"context"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
@@ -40,20 +40,20 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
return x.nextHandler.Get(req, stream)
}
-func (x *Common) Put() (PutObjectStream, error) {
+func (x *Common) Put(ctx context.Context) (PutObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Put()
+ return x.nextHandler.Put(ctx)
}
-func (x *Common) Patch() (PatchObjectStream, error) {
+func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Patch()
+ return x.nextHandler.Patch(ctx)
}
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index 980c4c6bd..f2bd907db 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -1,6 +1,7 @@
package target
import (
+ "context"
"errors"
"fmt"
@@ -13,20 +14,20 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-func New(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
// prepare needed put parameters
- if err := preparePrm(prm); err != nil {
+ if err := preparePrm(ctx, &prm); err != nil {
return nil, fmt.Errorf("could not prepare put parameters: %w", err)
}
if prm.Header.Signature() != nil {
- return newUntrustedTarget(prm)
+ return newUntrustedTarget(ctx, &prm)
}
- return newTrustedTarget(prm)
+ return newTrustedTarget(ctx, &prm)
}
-func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
+func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
}
@@ -48,8 +49,9 @@ func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWrit
}, nil
}
-func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
+func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ prm.Relay = nil // do not relay request without signature
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
}
@@ -85,12 +87,10 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter
user.IDFromKey(&ownerSession, key.PublicKey)
if !ownerObj.Equals(ownerSession) {
- return nil, errors.New("session token is missing but object owner id is different from the default key")
- }
- } else {
- if !ownerObj.Equals(sessionInfo.Owner) {
- return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
+ return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession)
}
+ } else if !ownerObj.Equals(sessionInfo.Owner) {
+ return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
}
if prm.SignRequestPrivateKey == nil {
@@ -110,11 +110,11 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter
}, nil
}
-func preparePrm(prm *objectwriter.Params) error {
+func preparePrm(ctx context.Context, prm *objectwriter.Params) error {
var err error
// get latest network map
- nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource)
+ nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource)
if err != nil {
return fmt.Errorf("could not get latest network map: %w", err)
}
@@ -125,7 +125,7 @@ func preparePrm(prm *objectwriter.Params) error {
}
// get container to store the object
- cnrInfo, err := prm.Config.ContainerSource.Get(idCnr)
+ cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr)
if err != nil {
return fmt.Errorf("could not get container by ID: %w", err)
}
diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go
index 6689557ee..6593d3ca0 100644
--- a/pkg/services/object/common/writer/common.go
+++ b/pkg/services/object/common/writer/common.go
@@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
}
func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
- traverser, err := placement.NewTraverser(n.Traversal.Opts...)
+ traverser, err := placement.NewTraverser(ctx, n.Opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
@@ -56,10 +56,10 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
}
// perform additional container broadcast if needed
- if n.Traversal.submitPrimaryPlacementFinish() {
+ if n.submitPrimaryPlacementFinish() {
err := n.ForEachNode(ctx, f)
if err != nil {
- n.cfg.Logger.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+ n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -79,33 +79,29 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.
continue
}
- workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey())
+ isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey())
item := new(bool)
wg.Add(1)
- if err := workerPool.Submit(func() {
+ go func() {
defer wg.Done()
err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr})
if err != nil {
resErr.Store(err)
- svcutil.LogServiceError(n.cfg.Logger, "PUT", addr.Addresses(), err)
+ svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
- }); err != nil {
- wg.Done()
- svcutil.LogWorkerPoolError(n.cfg.Logger, "PUT", err)
- return true
- }
+ }()
// Mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
- n.Traversal.submitProcessed(addr, item)
+ n.submitProcessed(addr, item)
}
wg.Wait()
diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go
index f62934bed..fff58aca7 100644
--- a/pkg/services/object/common/writer/distributed.go
+++ b/pkg/services/object/common/writer/distributed.go
@@ -28,7 +28,7 @@ type distributedWriter struct {
resetSuccessAfterOnBroadcast bool
}
-// parameters and state of container Traversal.
+// Traversal parameters and state of container.
type Traversal struct {
Opts []placement.Option
@@ -95,6 +95,10 @@ func (x errIncompletePut) Error() string {
return commonMsg
}
+func (x errIncompletePut) Unwrap() error {
+ return x.singleErr
+}
+
// WriteObject implements the transformer.ObjectWriter interface.
func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
t.obj = obj
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index fb0a8e4e5..26a53e315 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -14,6 +14,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
@@ -25,7 +26,10 @@ import (
var _ transformer.ObjectWriter = (*ECWriter)(nil)
-var errUnsupportedECObject = errors.New("object is not supported for erasure coding")
+var (
+ errUnsupportedECObject = errors.New("object is not supported for erasure coding")
+ errFailedToSaveAllECParts = errors.New("failed to save all EC parts")
+)
type ECWriter struct {
Config *Config
@@ -37,10 +41,12 @@ type ECWriter struct {
ObjectMeta object.ContentMeta
ObjectMetaValid bool
+
+ remoteRequestSignKey *ecdsa.PrivateKey
}
func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
- relayed, err := e.relayIfNotContainerNode(ctx, obj)
+ relayed, isContainerNode, err := e.relayIfNotContainerNode(ctx, obj)
if err != nil {
return err
}
@@ -60,23 +66,35 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
e.ObjectMetaValid = true
}
+ if isContainerNode {
+ restoreTokens := e.CommonPrm.ForgetTokens()
+ defer restoreTokens()
+ // As request executed on container node, so sign request with container key.
+ e.remoteRequestSignKey, err = e.Config.KeyStorage.GetKey(nil)
+ if err != nil {
+ return err
+ }
+ } else {
+ e.remoteRequestSignKey = e.Key
+ }
+
if obj.ECHeader() != nil {
return e.writeECPart(ctx, obj)
}
return e.writeRawObject(ctx, obj)
}
-func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) {
- if e.Relay == nil {
- return false, nil
- }
- currentNodeIsContainerNode, err := e.currentNodeIsContainerNode()
+func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) {
+ currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx)
if err != nil {
- return false, err
+ return false, false, err
}
if currentNodeIsContainerNode {
// object can be splitted or saved local
- return false, nil
+ return false, true, nil
+ }
+ if e.Relay == nil {
+ return false, currentNodeIsContainerNode, nil
}
objID := object.AddressOf(obj).Object()
var index uint32
@@ -85,13 +103,13 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O
index = obj.ECHeader().Index()
}
if err := e.relayToContainerNode(ctx, objID, index); err != nil {
- return false, err
+ return false, false, err
}
- return true, nil
+ return true, currentNodeIsContainerNode, nil
}
-func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
- t, err := placement.NewTraverser(e.PlacementOpts...)
+func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) {
+ t, err := placement.NewTraverser(ctx, e.PlacementOpts...)
if err != nil {
return false, err
}
@@ -110,7 +128,7 @@ func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
}
func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
- t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -131,21 +149,11 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
- completed := make(chan interface{})
- if poolErr := e.Config.RemotePool.Submit(func() {
- defer close(completed)
- err = e.Relay(ctx, info, c)
- }); poolErr != nil {
- close(completed)
- svcutil.LogWorkerPoolError(e.Config.Logger, "PUT", poolErr)
- return poolErr
- }
- <-completed
-
+ err = e.Relay(ctx, info, c)
if err == nil {
return nil
}
- e.Config.Logger.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
lastErr = err
}
}
@@ -162,7 +170,7 @@ func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error
return e.writePartLocal(ctx, obj)
}
- t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
if err != nil {
return err
}
@@ -197,14 +205,15 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
if err != nil {
return err
}
+ partsProcessed := make([]atomic.Bool, len(parts))
objID, _ := obj.ID()
- t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
- eg, egCtx := errgroup.WithContext(ctx)
for {
+ eg, egCtx := errgroup.WithContext(ctx)
nodes := t.Next()
if len(nodes) == 0 {
break
@@ -216,17 +225,31 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
}
for idx := range parts {
- eg.Go(func() error {
- return e.writePart(egCtx, parts[idx], idx, nodes, visited)
- })
- t.SubmitSuccess()
+ if !partsProcessed[idx].Load() {
+ eg.Go(func() error {
+ err := e.writePart(egCtx, parts[idx], idx, nodes, visited)
+ if err == nil {
+ partsProcessed[idx].Store(true)
+ t.SubmitSuccess()
+ }
+ return err
+ })
+ }
}
+ err = eg.Wait()
}
- if err := eg.Wait(); err != nil {
+ if err != nil {
return errIncompletePut{
singleErr: err,
}
}
+ for idx := range partsProcessed {
+ if !partsProcessed[idx].Load() {
+ return errIncompletePut{
+ singleErr: errFailedToSaveAllECParts,
+ }
+ }
+ }
return nil
}
@@ -242,8 +265,10 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
err := e.putECPartToNode(ctx, obj, node)
if err == nil {
return nil
+ } else if clientSDK.IsErrObjectAlreadyRemoved(err) {
+ return err
}
- e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
@@ -267,7 +292,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -276,7 +301,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
}
// try to save to any node not visited by current part
- for i := range len(nodes) {
+ for i := range nodes {
select {
case <-ctx.Done():
return ctx.Err()
@@ -291,7 +316,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -308,20 +333,11 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n
}
func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
- var err error
localTarget := LocalTarget{
- Storage: e.Config.LocalStore,
+ Storage: e.Config.LocalStore,
+ Container: e.Container,
}
- completed := make(chan interface{})
- if poolErr := e.Config.LocalPool.Submit(func() {
- defer close(completed)
- err = localTarget.WriteObject(ctx, obj, e.ObjectMeta)
- }); poolErr != nil {
- close(completed)
- return poolErr
- }
- <-completed
- return err
+ return localTarget.WriteObject(ctx, obj, e.ObjectMeta)
}
func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
@@ -329,21 +345,11 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n
client.NodeInfoFromNetmapElement(&clientNodeInfo, node)
remoteTaget := remoteWriter{
- privateKey: e.Key,
+ privateKey: e.remoteRequestSignKey,
clientConstructor: e.Config.ClientConstructor,
commonPrm: e.CommonPrm,
nodeInfo: clientNodeInfo,
}
- var err error
- completed := make(chan interface{})
- if poolErr := e.Config.RemotePool.Submit(func() {
- defer close(completed)
- err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
- }); poolErr != nil {
- close(completed)
- return poolErr
- }
- <-completed
- return err
+ return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
}
diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go
new file mode 100644
index 000000000..d5eeddf21
--- /dev/null
+++ b/pkg/services/object/common/writer/ec_test.go
@@ -0,0 +1,190 @@
+package writer
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "slices"
+ "strconv"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+type testPlacementBuilder struct {
+ vectors [][]netmap.NodeInfo
+}
+
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
+ [][]netmap.NodeInfo, error,
+) {
+ arr := slices.Clone(p.vectors[0])
+ return [][]netmap.NodeInfo{arr}, nil
+}
+
+type nmKeys struct{}
+
+func (nmKeys) IsLocalKey(_ []byte) bool {
+ return false
+}
+
+type clientConstructor struct {
+ vectors [][]netmap.NodeInfo
+}
+
+func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) {
+ if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) ||
+ bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) {
+ return multiAddressClient{err: errors.New("node unavailable")}, nil
+ }
+ return multiAddressClient{}, nil
+}
+
+type multiAddressClient struct {
+ client.MultiAddressClient
+ err error
+}
+
+func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) {
+ if c.err != nil {
+ return nil, c.err
+ }
+ return &apiclient.ResObjectPutSingle{}, nil
+}
+
+func (c multiAddressClient) ReportError(error) {
+}
+
+func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error {
+ return nil
+}
+
+func TestECWriter(t *testing.T) {
+ // Create container with policy EC 1.1
+ cnr := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetECDataCount(1)
+ x1.SetECParityCount(1)
+ p1.AddReplicas(x1)
+ cnr.SetPlacementPolicy(p1)
+ cnr.SetAttribute("cnr", "cnr1")
+
+ cid := cidtest.ID()
+
+ // Create 4 nodes, 2 nodes for chunks,
+ // 2 nodes for the case when the first two will fail.
+ ns, _ := testNodeMatrix(t, []int{4})
+
+ data := make([]byte, 100)
+ _, _ = rand.Read(data)
+ ver := version.Current()
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(data))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ obj := objectSDK.New()
+ obj.SetID(oidtest.ID())
+ obj.SetOwnerID(usertest.ID())
+ obj.SetContainerID(cid)
+ obj.SetVersion(&ver)
+ obj.SetPayload(data)
+ obj.SetPayloadSize(uint64(len(data)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+
+ // Builder return nodes without sort by hrw
+ builder := &testPlacementBuilder{
+ vectors: ns,
+ }
+
+ ownerKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ nodeKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ log, err := logger.NewLogger(logger.Prm{})
+ require.NoError(t, err)
+
+ var n nmKeys
+ ecw := ECWriter{
+ Config: &Config{
+ NetmapKeys: n,
+ Logger: log,
+ ClientConstructor: clientConstructor{vectors: ns},
+ KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil),
+ },
+ PlacementOpts: append(
+ []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)},
+ placement.WithCopyNumbers(nil)), // copies number ignored for EC
+ Container: cnr,
+ Key: &ownerKey.PrivateKey,
+ Relay: nil,
+ ObjectMetaValid: true,
+ }
+
+ err = ecw.WriteObject(context.Background(), obj)
+ require.NoError(t, err)
+}
+
+func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
+ mNodes := make([][]netmap.NodeInfo, len(dim))
+ mAddr := make([][]string, len(dim))
+
+ for i := range dim {
+ ns := make([]netmap.NodeInfo, dim[i])
+ as := make([]string, dim[i])
+
+ for j := range dim[i] {
+ a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
+ strconv.Itoa(i),
+ strconv.Itoa(60000+j),
+ )
+
+ var ni netmap.NodeInfo
+ ni.SetNetworkEndpoints(a)
+ ni.SetPublicKey([]byte(a))
+
+ var na network.AddressGroup
+
+ err := na.FromIterator(netmapcore.Node(ni))
+ require.NoError(t, err)
+
+ as[j] = network.StringifyGroup(na)
+
+ ns[j] = ni
+ }
+
+ mNodes[i] = ns
+ mAddr[i] = as
+ }
+
+ return mNodes, mAddr
+}
diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go
index 02fd25b9e..cf3d03275 100644
--- a/pkg/services/object/common/writer/local.go
+++ b/pkg/services/object/common/writer/local.go
@@ -4,7 +4,9 @@ import (
"context"
"fmt"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -13,7 +15,7 @@ import (
type ObjectStorage interface {
// Put must save passed object
// and return any appeared error.
- Put(context.Context, *objectSDK.Object) error
+ Put(context.Context, *objectSDK.Object, bool) error
// Delete must delete passed objects
// and return any appeared error.
Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
@@ -25,10 +27,15 @@ type ObjectStorage interface {
}
type LocalTarget struct {
- Storage ObjectStorage
+ Storage ObjectStorage
+ Container containerSDK.Container
}
func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
+ if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
+ return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
+ }
+
switch meta.Type() {
case objectSDK.TypeTombstone:
err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
@@ -44,8 +51,5 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
- if err := t.Storage.Put(ctx, obj); err != nil {
- return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
- }
return nil
}
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
index 3d50da988..d3d2b41b4 100644
--- a/pkg/services/object/common/writer/writer.go
+++ b/pkg/services/object/common/writer/writer.go
@@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -24,7 +23,7 @@ type MaxSizeSource interface {
// of physically stored object in system.
//
// Must return 0 if value can not be obtained.
- MaxObjectSize() uint64
+ MaxObjectSize(context.Context) uint64
}
type ClientConstructor interface {
@@ -32,7 +31,7 @@ type ClientConstructor interface {
}
type InnerRing interface {
- InnerRingKeys() ([][]byte, error)
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
}
type FormatValidatorConfig interface {
@@ -52,8 +51,6 @@ type Config struct {
NetmapSource netmap.Source
- RemotePool, LocalPool util.WorkerPool
-
NetmapKeys netmap.AnnouncedKeys
FormatValidator *object.FormatValidator
@@ -69,12 +66,6 @@ type Config struct {
type Option func(*Config)
-func WithWorkerPools(remote, local util.WorkerPool) Option {
- return func(c *Config) {
- c.RemotePool, c.LocalPool = remote, local
- }
-}
-
func WithLogger(l *logger.Logger) Option {
return func(c *Config) {
c.Logger = l
@@ -87,13 +78,6 @@ func WithVerifySessionTokenIssuer(v bool) Option {
}
}
-func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) {
- if c.NetmapKeys.IsLocalKey(pub) {
- return c.LocalPool, true
- }
- return c.RemotePool, false
-}
-
type Params struct {
Config *Config
@@ -150,7 +134,8 @@ func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.Object
nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget {
if node.Local {
return LocalTarget{
- Storage: prm.Config.LocalStore,
+ Storage: prm.Config.LocalStore,
+ Container: prm.Container,
}
}
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 88454625d..57e33fde7 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -33,13 +33,13 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(logs.ServingRequest)
+ exec.log.Debug(ctx, logs.ServingRequest)
if err := exec.executeLocal(ctx); err != nil {
- exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
return err
}
- exec.log.Debug(logs.OperationFinishedSuccessfully)
+ exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
return nil
}
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index c2f92950f..a99ba3586 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -4,12 +4,13 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"strconv"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -34,13 +35,13 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = &logger.Logger{Logger: l.With(
+ exec.log = l.With(
zap.String("request", "DELETE"),
zap.Stringer("address", exec.address()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )}
+ )
}
func (exec *execCtx) isLocal() bool {
@@ -83,16 +84,16 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
exec.splitInfo = errSplitInfo.SplitInfo()
exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
- exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
+ exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
if err := exec.collectMembers(ctx); err != nil {
return err
}
- exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
+ exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected)
return nil
case errors.As(err, &errECInfo):
- exec.log.Debug(logs.DeleteECObjectReceived)
+ exec.log.Debug(ctx, logs.DeleteECObjectReceived)
return nil
}
@@ -108,7 +109,7 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
func (exec *execCtx) collectMembers(ctx context.Context) error {
if exec.splitInfo == nil {
- exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
+ exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY)
return nil
}
@@ -131,7 +132,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error {
func (exec *execCtx) collectChain(ctx context.Context) error {
var chain []oid.ID
- exec.log.Debug(logs.DeleteAssemblingChain)
+ exec.log.Debug(ctx, logs.DeleteAssemblingChain)
for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
chain = append(chain, prev)
@@ -152,7 +153,7 @@ func (exec *execCtx) collectChain(ctx context.Context) error {
}
func (exec *execCtx) collectChildren(ctx context.Context) error {
- exec.log.Debug(logs.DeleteCollectingChildren)
+ exec.log.Debug(ctx, logs.DeleteCollectingChildren)
children, err := exec.svc.header.children(ctx, exec)
if err != nil {
@@ -165,7 +166,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) error {
}
func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
- exec.log.Debug(logs.DeleteSupplementBySplitID)
+ exec.log.Debug(ctx, logs.DeleteSupplementBySplitID)
chain, err := exec.svc.searcher.splitMembers(ctx, exec)
if err != nil {
@@ -182,7 +183,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
for i := range members {
for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body
if members[i].Equals(incoming[j]) {
- incoming = append(incoming[:j], incoming[j+1:]...)
+ incoming = slices.Delete(incoming, j, j+1)
j--
}
}
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 2c3c47f49..01b2d9b3f 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -10,13 +10,13 @@ import (
)
func (exec *execCtx) executeLocal(ctx context.Context) error {
- exec.log.Debug(logs.DeleteFormingTombstoneStructure)
+ exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure)
if err := exec.formTombstone(ctx); err != nil {
return err
}
- exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
+ exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
return exec.saveTombstone(ctx)
}
@@ -33,7 +33,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) error {
)
exec.addMembers([]oid.ID{exec.address().Object()})
- exec.log.Debug(logs.DeleteFormingSplitInfo)
+ exec.log.Debug(ctx, logs.DeleteFormingSplitInfo)
if err := exec.formExtendedInfo(ctx); err != nil {
return fmt.Errorf("form extended info: %w", err)
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index 0ba21eee3..1c4d7d585 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -27,11 +27,11 @@ type Option func(*cfg)
type NetworkInfo interface {
netmap.State
- // Must return the lifespan of the tombstones
+ // TombstoneLifetime must return the lifespan of the tombstones
// in the FrostFS epochs.
TombstoneLifetime() (uint64, error)
- // Returns user ID of the local storage node. Result must not be nil.
+ // LocalNodeID returns user ID of the local storage node. Result must not be nil.
// New tombstone objects will have the result as an owner ID if removal is executed w/o a session.
LocalNodeID() user.ID
}
@@ -72,7 +72,7 @@ func New(gs *getsvc.Service,
opts ...Option,
) *Service {
c := &cfg{
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
header: &headSvcWrapper{s: gs},
searcher: &searchSvcWrapper{s: ss},
placer: &putSvcWrapper{s: ps},
@@ -92,6 +92,6 @@ func New(gs *getsvc.Service,
// WithLogger returns option to specify Delete service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "objectSDK.Delete service"))}
+ c.log = l
}
}
diff --git a/pkg/services/object/delete/v2/service.go b/pkg/services/object/delete/v2/service.go
index 10dcd0e87..7146f0361 100644
--- a/pkg/services/object/delete/v2/service.go
+++ b/pkg/services/object/delete/v2/service.go
@@ -3,8 +3,8 @@ package deletesvc
import (
"context"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Delete operation of Object service v2.
diff --git a/pkg/services/object/delete/v2/util.go b/pkg/services/object/delete/v2/util.go
index d0db1f543..c57d4562a 100644
--- a/pkg/services/object/delete/v2/util.go
+++ b/pkg/services/object/delete/v2/util.go
@@ -4,10 +4,10 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index 9f17f1e4c..e80132489 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -13,7 +13,7 @@ import (
func (r *request) assemble(ctx context.Context) {
if !r.canAssembleComplexObject() {
- r.log.Debug(logs.GetCanNotAssembleTheObject)
+ r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
return
}
@@ -35,23 +35,23 @@ func (r *request) assemble(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(logs.GetTryingToAssembleTheObject)
+ r.log.Debug(ctx, logs.GetTryingToAssembleTheObject)
r.prm.common = r.prm.common.WithLocalOnly(false)
assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly())
- r.log.Debug(logs.GetAssemblingSplittedObject,
+ r.log.Debug(ctx, logs.GetAssemblingSplittedObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted,
+ defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil {
- r.log.Warn(logs.GetFailedToAssembleSplittedObject,
+ r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject,
zap.Error(err),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
@@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque
detachedExecutor.execute(ctx)
- return detachedExecutor.statusError.err
+ return detachedExecutor.err
}
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
index a58602bf7..59dd7fd93 100644
--- a/pkg/services/object/get/assembleec.go
+++ b/pkg/services/object/get/assembleec.go
@@ -12,7 +12,7 @@ import (
func (r *request) assembleEC(ctx context.Context) {
if r.isRaw() {
- r.log.Debug(logs.GetCanNotAssembleTheObject)
+ r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
return
}
@@ -34,29 +34,29 @@ func (r *request) assembleEC(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(logs.GetTryingToAssembleTheECObject)
+ r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject)
// initialize epoch number
- ok := r.initEpoch()
+ ok := r.initEpoch(ctx)
if !ok {
return
}
r.prm.common = r.prm.common.WithLocalOnly(false)
- assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
+ assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
- r.log.Debug(logs.GetAssemblingECObject,
+ r.log.Debug(ctx, logs.GetAssemblingECObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(logs.GetAssemblingECObjectCompleted,
+ defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) {
- r.log.Warn(logs.GetFailedToAssembleECObject,
+ r.log.Warn(ctx, logs.GetFailedToAssembleECObject,
zap.Error(err),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go
index ff3f90bf2..b24c9417b 100644
--- a/pkg/services/object/get/assembler.go
+++ b/pkg/services/object/get/assembler.go
@@ -2,6 +2,7 @@ package getsvc
import (
"context"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -59,53 +60,24 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS
if previousID == nil && len(childrenIDs) == 0 {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
}
+
if len(childrenIDs) > 0 {
- if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil {
- return nil, err
+ if a.rng != nil {
+ err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer)
+ } else {
+ err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer)
}
} else {
- if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil {
- return nil, err
+ if a.rng != nil {
+ err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer)
+ } else {
+ err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer)
}
}
- return a.parentObject, nil
-}
-
-func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- var sourceObjectIDs []oid.ID
- sourceObjectID, ok := a.splitInfo.Link()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- sourceObjectID, ok = a.splitInfo.LastPart()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- if len(sourceObjectIDs) == 0 {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- for _, sourceObjectID = range sourceObjectIDs {
- obj, err := a.getParent(ctx, sourceObjectID, writer)
- if err == nil {
- return obj, nil
- }
- }
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
-}
-
-func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
- obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
if err != nil {
return nil, err
}
- parent := obj.Parent()
- if parent == nil {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- if err := writer.WriteHeader(ctx, parent); err != nil {
- return nil, err
- }
- return obj, nil
+ return a.parentObject, nil
}
func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) {
@@ -190,26 +162,16 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD
}
func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if a.rng == nil {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
- }
- return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true)
- }
-
- if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
return err
}
- return writer.WriteChunk(ctx, a.parentObject.Payload())
+ return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true)
}
func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if a.rng == nil {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
- }
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
}
-
if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil {
return err
}
@@ -219,16 +181,9 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev
return nil
}
-func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error {
- withRng := len(partRanges) > 0 && a.rng != nil
-
+func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error {
for i := range partIDs {
- var r *objectSDK.Range
- if withRng {
- r = &partRanges[i]
- }
-
- _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer)
+ _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer)
if err != nil {
return err
}
@@ -237,22 +192,13 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec
}
func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, rngs, err := a.buildChain(ctx, prevID)
+ chain, err := a.buildChain(ctx, prevID)
if err != nil {
return err
}
- reverseRngs := len(rngs) > 0
-
- for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 {
- chain[left], chain[right] = chain[right], chain[left]
-
- if reverseRngs {
- rngs[left], rngs[right] = rngs[right], rngs[left]
- }
- }
-
- return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false)
+ slices.Reverse(chain)
+ return a.assemblePayloadByObjectIDs(ctx, writer, chain, false)
}
func (a *assembler) isChild(obj *objectSDK.Object) bool {
@@ -260,63 +206,28 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool {
return parent == nil || equalAddresses(a.addr, object.AddressOf(parent))
}
-func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
+func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) {
var (
chain []oid.ID
- rngs []objectSDK.Range
- from = a.rng.GetOffset()
- to = from + a.rng.GetLength()
hasPrev = true
)
// fill the chain end-to-start
for hasPrev {
- // check that only for "range" requests,
- // for `GET` it stops via the false `withPrev`
- if a.rng != nil && a.currentOffset <= from {
- break
- }
-
head, err := a.objGetter.HeadObject(ctx, prevID)
if err != nil {
- return nil, nil, err
+ return nil, err
}
if !a.isChild(head) {
- return nil, nil, errParentAddressDiffers
+ return nil, errParentAddressDiffers
}
- if a.rng != nil {
- sz := head.PayloadSize()
-
- a.currentOffset -= sz
-
- if a.currentOffset < to {
- off := uint64(0)
- if from > a.currentOffset {
- off = from - a.currentOffset
- sz -= from - a.currentOffset
- }
-
- if to < a.currentOffset+off+sz {
- sz = to - off - a.currentOffset
- }
-
- index := len(rngs)
- rngs = append(rngs, objectSDK.Range{})
- rngs[index].SetOffset(off)
- rngs[index].SetLength(sz)
-
- id, _ := head.ID()
- chain = append(chain, id)
- }
- } else {
- id, _ := head.ID()
- chain = append(chain, id)
- }
+ id, _ := head.ID()
+ chain = append(chain, id)
prevID, hasPrev = head.PreviousID()
}
- return chain, rngs, nil
+ return chain, nil
}
diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go
new file mode 100644
index 000000000..ff213cb82
--- /dev/null
+++ b/pkg/services/object/get/assembler_head.go
@@ -0,0 +1,45 @@
+package getsvc
+
+import (
+ "context"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ var sourceObjectIDs []oid.ID
+ sourceObjectID, ok := a.splitInfo.Link()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ sourceObjectID, ok = a.splitInfo.LastPart()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ if len(sourceObjectIDs) == 0 {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ for _, sourceObjectID = range sourceObjectIDs {
+ obj, err := a.getParent(ctx, sourceObjectID, writer)
+ if err == nil {
+ return obj, nil
+ }
+ }
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+}
+
+func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
+ if err != nil {
+ return nil, err
+ }
+ parent := obj.Parent()
+ if parent == nil {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ if err := writer.WriteHeader(ctx, parent); err != nil {
+ return nil, err
+ }
+ return obj, nil
+}
diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go
new file mode 100644
index 000000000..780693c40
--- /dev/null
+++ b/pkg/services/object/get/assembler_range.go
@@ -0,0 +1,87 @@
+package getsvc
+
+import (
+ "context"
+ "slices"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
+ if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
+ return err
+ }
+ return writer.WriteChunk(ctx, a.parentObject.Payload())
+}
+
+func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
+ if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil {
+ return err
+ }
+ if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part
+ return err
+ }
+ return nil
+}
+
+func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error {
+ for i := range partIDs {
+ _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
+ chain, rngs, err := a.buildChainRange(ctx, prevID)
+ if err != nil {
+ return err
+ }
+
+ slices.Reverse(chain)
+ slices.Reverse(rngs)
+ return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs)
+}
+
+func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
+ var (
+ chain []oid.ID
+ rngs []objectSDK.Range
+ from = a.rng.GetOffset()
+ to = from + a.rng.GetLength()
+
+ hasPrev = true
+ )
+
+ // fill the chain end-to-start
+ for hasPrev && from < a.currentOffset {
+ head, err := a.objGetter.HeadObject(ctx, prevID)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !a.isChild(head) {
+ return nil, nil, errParentAddressDiffers
+ }
+
+ nextOffset := a.currentOffset - head.PayloadSize()
+ clampedFrom := max(from, nextOffset)
+ clampedTo := min(to, a.currentOffset)
+ if clampedFrom < clampedTo {
+ index := len(rngs)
+ rngs = append(rngs, objectSDK.Range{})
+ rngs[index].SetOffset(clampedFrom - nextOffset)
+ rngs[index].SetLength(clampedTo - clampedFrom)
+
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
+
+ a.currentOffset = nextOffset
+ prevID, hasPrev = head.PreviousID()
+ }
+
+ return chain, rngs, nil
+}
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index dde0d7dad..e0a7e1da6 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -34,7 +34,6 @@ type assemblerec struct {
rng *objectSDK.Range
remoteStorage ecRemoteStorage
localStorage localStorage
- cs container.Source
log *logger.Logger
head bool
traverserGenerator traverserGenerator
@@ -47,7 +46,6 @@ func newAssemblerEC(
rng *objectSDK.Range,
remoteStorage ecRemoteStorage,
localStorage localStorage,
- cs container.Source,
log *logger.Logger,
head bool,
tg traverserGenerator,
@@ -59,7 +57,6 @@ func newAssemblerEC(
ecInfo: ecInfo,
remoteStorage: remoteStorage,
localStorage: localStorage,
- cs: cs,
log: log,
head: head,
traverserGenerator: tg,
@@ -128,7 +125,7 @@ func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter
func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) {
objID := a.addr.Object()
- trav, cnr, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch)
+ trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch)
if err != nil {
return nil, err
}
@@ -158,7 +155,7 @@ func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Travers
parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount)
if err != nil {
- a.log.Debug(logs.GetUnableToGetAllPartsECObject, zap.Error(err))
+ a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err))
}
return parts
}
@@ -232,7 +229,7 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
var objID oid.ID
err := objID.ReadFromV2(ch.ID)
if err != nil {
- a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
return nil
}
var addr oid.Address
@@ -241,15 +238,13 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
var object *objectSDK.Object
if a.head {
object, err = a.localStorage.Head(ctx, addr, false)
- if err != nil {
- a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
- return nil
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
}
} else {
object, err = a.localStorage.Get(ctx, addr)
- if err != nil {
- a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
- return nil
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
}
}
return object
@@ -262,11 +257,11 @@ func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.N
var errECInfo *objectSDK.ECInfoError
_, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true)
if err == nil {
- a.log.Error(logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
+ a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
return nil
}
if !errors.As(err, &errECInfo) {
- a.log.Warn(logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
+ a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
return nil
}
result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks))
@@ -280,7 +275,7 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
var objID oid.ID
err := objID.ReadFromV2(ch.ID)
if err != nil {
- a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
return nil
}
var addr oid.Address
@@ -289,15 +284,13 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
var object *objectSDK.Object
if a.head {
object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false)
- if err != nil {
- a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
- return nil
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
}
} else {
object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node)
- if err != nil {
- a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
- return nil
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
}
}
return object
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index d22b14192..dfb31133c 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -10,32 +10,25 @@ import (
func (r *request) executeOnContainer(ctx context.Context) {
if r.isLocal() {
- r.log.Debug(logs.GetReturnResultDirectly)
+ r.log.Debug(ctx, logs.GetReturnResultDirectly)
return
}
lookupDepth := r.netmapLookupDepth()
- r.log.Debug(logs.TryingToExecuteInContainer,
+ r.log.Debug(ctx, logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- ok := r.initEpoch()
+ ok := r.initEpoch(ctx)
if !ok {
return
}
- for {
- if r.processCurrentEpoch(ctx) {
- break
- }
-
- // check the maximum depth has been reached
- if lookupDepth == 0 {
- break
- }
+ localStatus := r.status
+ for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 {
lookupDepth--
// go to the previous epoch
@@ -43,12 +36,12 @@ func (r *request) executeOnContainer(ctx context.Context) {
}
}
-func (r *request) processCurrentEpoch(ctx context.Context) bool {
- r.log.Debug(logs.ProcessEpoch,
+func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
+ r.log.Debug(ctx, logs.ProcessEpoch,
zap.Uint64("number", r.curProcEpoch),
)
- traverser, ok := r.generateTraverser(r.address())
+ traverser, ok := r.generateTraverser(ctx, r.address())
if !ok {
return true
}
@@ -56,12 +49,16 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- r.status = statusUndefined
+ if localStatus == statusEC { // possible only for raw == true and local == false
+ r.status = statusEC
+ } else {
+ r.status = statusUndefined
+ }
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- r.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
+ r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
return false
}
@@ -69,7 +66,7 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool {
for i := range addrs {
select {
case <-ctx.Done():
- r.log.Debug(logs.InterruptPlacementIterationByContext,
+ r.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
zap.Error(ctx.Err()),
)
@@ -85,7 +82,7 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool {
client.NodeInfoFromNetmapElement(&info, addrs[i])
if r.processNode(ctx, info) {
- r.log.Debug(logs.GetCompletingTheOperation)
+ r.log.Debug(ctx, logs.GetCompletingTheOperation)
return true
}
}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 03b7f8bf2..3a50308c2 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -87,51 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error {
exec.execute(ctx)
- return exec.statusError.err
+ return exec.err
}
-func (exec *request) execute(ctx context.Context) {
- exec.log.Debug(logs.ServingRequest)
+func (r *request) execute(ctx context.Context) {
+ r.log.Debug(ctx, logs.ServingRequest)
// perform local operation
- exec.executeLocal(ctx)
+ r.executeLocal(ctx)
- exec.analyzeStatus(ctx, true)
+ r.analyzeStatus(ctx, true)
}
-func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
+func (r *request) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
- switch exec.status {
+ switch r.status {
case statusOK:
- exec.log.Debug(logs.OperationFinishedSuccessfully)
+ r.log.Debug(ctx, logs.OperationFinishedSuccessfully)
case statusINHUMED:
- exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
+ r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved)
case statusVIRTUAL:
- exec.log.Debug(logs.GetRequestedObjectIsVirtual)
- exec.assemble(ctx)
+ r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual)
+ r.assemble(ctx)
case statusOutOfRange:
- exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
+ r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds)
case statusEC:
- exec.log.Debug(logs.GetRequestedObjectIsEC)
- if exec.isRaw() && execCnr {
- exec.executeOnContainer(ctx)
- exec.analyzeStatus(ctx, false)
+ r.log.Debug(ctx, logs.GetRequestedObjectIsEC)
+ if r.isRaw() && execCnr {
+ r.executeOnContainer(ctx)
+ r.analyzeStatus(ctx, false)
}
- exec.assembleEC(ctx)
+ r.assembleEC(ctx)
default:
- exec.log.Debug(logs.OperationFinishedWithError,
- zap.Error(exec.err),
+ r.log.Debug(ctx, logs.OperationFinishedWithError,
+ zap.Error(r.err),
)
var errAccessDenied *apistatus.ObjectAccessDenied
- if execCnr && errors.As(exec.err, &errAccessDenied) {
+ if execCnr && errors.As(r.err, &errAccessDenied) {
// Local get can't return access denied error, so this error was returned by
// write to the output stream. So there is no need to try to find object on other nodes.
return
}
if execCnr {
- exec.executeOnContainer(ctx)
- exec.analyzeStatus(ctx, false)
+ r.executeOnContainer(ctx)
+ r.analyzeStatus(ctx, false)
}
}
}
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 6827018dc..3efc72065 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -63,7 +63,7 @@ type testClient struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch() (uint64, error) {
+func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
return uint64(e), nil
}
@@ -79,7 +79,7 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
+func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
opts := make([]placement.Option, 0, 4)
opts = append(opts,
placement.ForContainer(g.c),
@@ -91,13 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e ui
opts = append(opts, placement.ForObject(*obj))
}
- t, err := placement.NewTraverser(opts...)
+ t, err := placement.NewTraverser(context.Background(), opts...)
return t, &containerCore.Container{
Value: g.c,
}, err
}
-func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go
index a6882d4a8..83ef54744 100644
--- a/pkg/services/object/get/getrangeec_test.go
+++ b/pkg/services/object/get/getrangeec_test.go
@@ -6,12 +6,12 @@ import (
"fmt"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -28,14 +28,14 @@ type containerStorage struct {
cnt *container.Container
}
-func (cs *containerStorage) Get(cid.ID) (*coreContainer.Container, error) {
+func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) {
coreCnt := coreContainer.Container{
Value: *cs.cnt,
}
return &coreCnt, nil
}
-func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) {
+func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
return nil, nil
}
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index 1cd5e549c..cfabb082f 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -31,7 +31,7 @@ func (r *request) executeLocal(ctx context.Context) {
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetLocalGetFailed, zap.Error(err))
+ r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 163767c43..78ca5b5e3 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -18,9 +18,9 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
defer span.End()
- r.log.Debug(logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
+ r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
- rs, ok := r.getRemoteStorage(info)
+ rs, ok := r.getRemoteStorage(ctx, info)
if !ok {
return true
}
@@ -35,13 +35,13 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
switch {
default:
- r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err))
+ r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err))
if r.status != statusEC {
// for raw requests, continue to collect other parts
r.status = statusUndefined
if errors.As(err, &errAccessDenied) {
r.err = err
- } else {
+ } else if r.err == nil || !errors.As(r.err, &errAccessDenied) {
r.err = new(apistatus.ObjectNotFound)
}
}
diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go
index 0df67dec9..2c64244cf 100644
--- a/pkg/services/object/get/remote_getter.go
+++ b/pkg/services/object/get/remote_getter.go
@@ -30,7 +30,7 @@ func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Ob
if err != nil {
return nil, err
}
- epoch, err := g.es.Epoch()
+ epoch, err := g.es.Epoch(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go
index 1a7a43a35..268080486 100644
--- a/pkg/services/object/get/request.go
+++ b/pkg/services/object/get/request.go
@@ -47,14 +47,14 @@ func (r *request) setLogger(l *logger.Logger) {
req = "GET_RANGE"
}
- r.log = &logger.Logger{Logger: l.With(
+ r.log = l.With(
zap.String("request", req),
zap.Stringer("address", r.address()),
zap.Bool("raw", r.isRaw()),
zap.Bool("local", r.isLocal()),
zap.Bool("with session", r.prm.common.SessionToken() != nil),
zap.Bool("with bearer", r.prm.common.BearerToken() != nil),
- )}
+ )
}
func (r *request) isLocal() bool {
@@ -116,20 +116,20 @@ func (r *request) netmapLookupDepth() uint64 {
return r.prm.common.NetmapLookupDepth()
}
-func (r *request) initEpoch() bool {
+func (r *request) initEpoch(ctx context.Context) bool {
r.curProcEpoch = r.netmapEpoch()
if r.curProcEpoch > 0 {
return true
}
- e, err := r.epochSource.Epoch()
+ e, err := r.epochSource.Epoch(ctx)
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
+ r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
return false
case err == nil:
@@ -138,17 +138,17 @@ func (r *request) initEpoch() bool {
}
}
-func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) {
+func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) {
obj := addr.Object()
- t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch)
+ t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch)
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
+ r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
return nil, false
case err == nil:
@@ -156,13 +156,13 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo
}
}
-func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) {
+func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) {
rs, err := r.remoteStorageConstructor.Get(info)
if err != nil {
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient)
+ r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient)
return nil, false
}
@@ -185,7 +185,7 @@ func (r *request) writeCollectedHeader(ctx context.Context) bool {
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err))
+ r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
@@ -206,7 +206,7 @@ func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object)
r.status = statusUndefined
r.err = err
- r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err))
+ r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index 3413abeb7..a103f5a7f 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -34,7 +34,7 @@ func New(
result := &Service{
keyStore: ks,
epochSource: es,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
localStorage: &engineLocalStorage{
engine: e,
},
@@ -53,6 +53,6 @@ func New(
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(s *Service) {
- s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))}
+ s.log = l
}
}
diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go
index 9669afdba..664366d1b 100644
--- a/pkg/services/object/get/types.go
+++ b/pkg/services/object/get/types.go
@@ -20,11 +20,11 @@ import (
)
type epochSource interface {
- Epoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
}
type traverserGenerator interface {
- GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
type keyStorage interface {
diff --git a/pkg/services/object/get/v2/errors.go b/pkg/services/object/get/v2/errors.go
index 213455e10..aaa09b891 100644
--- a/pkg/services/object/get/v2/errors.go
+++ b/pkg/services/object/get/v2/errors.go
@@ -4,8 +4,8 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ refs "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
)
var (
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
index 18194c740..60fcd7fbf 100644
--- a/pkg/services/object/get/v2/get_forwarder.go
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -7,16 +7,16 @@ import (
"io"
"sync"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go
index 10ecfc4a3..a44616fc9 100644
--- a/pkg/services/object/get/v2/get_range_forwarder.go
+++ b/pkg/services/object/get/v2/get_range_forwarder.go
@@ -7,15 +7,15 @@ import (
"io"
"sync"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go
index e97b60f66..308ccd512 100644
--- a/pkg/services/object/get/v2/get_range_hash.go
+++ b/pkg/services/object/get/v2/get_range_hash.go
@@ -5,15 +5,15 @@ import (
"encoding/hex"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -22,7 +22,7 @@ import (
// GetRangeHash calls internal service and returns v2 response.
func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- forward, err := s.needToForwardGetRangeHashRequest(req)
+ forward, err := s.needToForwardGetRangeHashRequest(ctx, req)
if err != nil {
return nil, err
}
@@ -48,7 +48,7 @@ type getRangeForwardParams struct {
address oid.Address
}
-func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
+func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
if req.GetMetaHeader().GetTTL() <= 1 {
return getRangeForwardParams{}, nil
}
@@ -66,17 +66,17 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq
}
result.address = addr
- cont, err := s.contSource.Get(addr.Container())
+ cont, err := s.contSource.Get(ctx, addr.Container())
if err != nil {
return result, fmt.Errorf("(%T) could not get container: %w", s, err)
}
- epoch, err := s.netmapSource.Epoch()
+ epoch, err := s.netmapSource.Epoch(ctx)
if err != nil {
return result, fmt.Errorf("(%T) could not get epoch: %w", s, err)
}
- nm, err := s.netmapSource.GetNetMapByEpoch(epoch)
+ nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch)
if err != nil {
return result, fmt.Errorf("(%T) could not get netmap: %w", s, err)
}
@@ -84,7 +84,7 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq
builder := placement.NewNetworkMapBuilder(nm)
objectID := addr.Object()
- nodesVector, err := builder.BuildPlacement(addr.Container(), &objectID, cont.Value.PlacementPolicy())
+ nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy())
if err != nil {
return result, fmt.Errorf("(%T) could not build object placement: %w", s, err)
}
@@ -125,14 +125,14 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
var addrGr network.AddressGroup
if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil {
- s.log.Warn(logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
continue
}
var extAddr network.AddressGroup
if len(node.ExternalAddresses()) > 0 {
if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil {
- s.log.Warn(logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
continue
}
}
@@ -150,12 +150,12 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
if firstErr == nil {
firstErr = err
}
- s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromNode,
+ s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode,
zap.String("node_public_key", hex.EncodeToString(node.PublicKey())),
zap.Stringer("address", params.address),
zap.Error(err))
}
- s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
+ s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
if firstErr != nil {
return nil, firstErr
}
diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go
index 5e16008b8..56056398d 100644
--- a/pkg/services/object/get/v2/head_forwarder.go
+++ b/pkg/services/object/get/v2/head_forwarder.go
@@ -5,15 +5,15 @@ import (
"crypto/ecdsa"
"sync"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go
index edd19b441..0ec8912fd 100644
--- a/pkg/services/object/get/v2/service.go
+++ b/pkg/services/object/get/v2/service.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -12,6 +11,7 @@ import (
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
@@ -60,7 +60,7 @@ func NewService(svc *getsvc.Service,
netmapSource: netmapSource,
announcedKeys: announcedKeys,
contSource: contSource,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
}
for i := range opts {
@@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get V2 service"))}
+ c.log = l
}
}
diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go
index ce9a5c767..0d73bcd4d 100644
--- a/pkg/services/object/get/v2/streamer.go
+++ b/pkg/services/object/get/v2/streamer.go
@@ -3,8 +3,8 @@ package getsvc
import (
"context"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
@@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec
p.SetHeader(objV2.GetHeader())
p.SetSignature(objV2.GetSignature())
- return s.GetObjectStream.Send(newResponse(p))
+ return s.Send(newResponse(p))
}
func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error {
p := new(objectV2.GetObjectPartChunk)
p.SetChunk(chunk)
- return s.GetObjectStream.Send(newResponse(p))
+ return s.Send(newResponse(p))
}
func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
@@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
}
func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error {
- return s.GetObjectRangeStream.Send(newRangeResponse(chunk))
+ return s.Send(newRangeResponse(chunk))
}
func newRangeResponse(p []byte) *objectV2.GetRangeResponse {
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index 852c2aec3..e699a3779 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -3,19 +3,20 @@ package getsvc
import (
"context"
"crypto/sha256"
+ "errors"
"hash"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/status"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/status"
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -182,9 +183,7 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran
default:
return nil, errUnknownChechsumType(t)
case refs.SHA256:
- p.SetHashGenerator(func() hash.Hash {
- return sha256.New()
- })
+ p.SetHashGenerator(sha256.New)
case refs.TillichZemor:
p.SetHashGenerator(func() hash.Hash {
return tz.New()
@@ -360,19 +359,20 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien
info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) {
var err error
-
- defer func() {
- stop = err == nil
-
- if stop || firstErr == nil {
- firstErr = err
- }
-
- // would be nice to log otherwise
- }()
-
res, err = f(ctx, addr, c, key)
+ // non-status logic error that could be returned
+ // from the SDK client; should not be considered
+ // as a connection error
+ var siErr *objectSDK.SplitInfoError
+ var eiErr *objectSDK.ECInfoError
+
+ stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr)
+
+ if stop || firstErr == nil {
+ firstErr = err
+ }
+
return
})
diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go
index 2c405070d..3e8832640 100644
--- a/pkg/services/object/internal/client/client.go
+++ b/pkg/services/object/internal/client/client.go
@@ -7,9 +7,11 @@ import (
"errors"
"fmt"
"io"
+ "strconv"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -31,6 +33,8 @@ type commonPrm struct {
local bool
xHeaders []string
+
+ netmapEpoch uint64
}
// SetClient sets base client for ForstFS API communication.
@@ -73,6 +77,14 @@ func (x *commonPrm) SetXHeaders(hs []string) {
x.xHeaders = hs
}
+func (x *commonPrm) calculateXHeaders() []string {
+ hs := x.xHeaders
+ if x.netmapEpoch != 0 {
+ hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10))
+ }
+ return hs
+}
+
type readPrmCommon struct {
commonPrm
}
@@ -80,8 +92,8 @@ type readPrmCommon struct {
// SetNetmapEpoch sets the epoch number to be used to locate the objectSDK.
//
// By default current epoch on the server will be used.
-func (x *readPrmCommon) SetNetmapEpoch(_ uint64) {
- // FIXME(@fyrchik): https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/465
+func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) {
+ x.netmapEpoch = epoch
}
// GetObjectPrm groups parameters of GetObject operation.
@@ -139,7 +151,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.xHeaders
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Key = prm.key
@@ -233,7 +245,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error)
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
- prm.ClientParams.XHeaders = prm.xHeaders
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams)
if err == nil {
@@ -326,7 +338,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.xHeaders
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Length = prm.ln
@@ -390,7 +402,7 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
defer span.End()
prmCli := client.PrmObjectPutInit{
- XHeaders: prm.xHeaders,
+ XHeaders: prm.calculateXHeaders(),
BearerToken: prm.tokenBearer,
Session: prm.tokenSession,
Local: true,
@@ -437,7 +449,7 @@ func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, erro
}
prmCli := client.PrmObjectPutSingle{
- XHeaders: prm.xHeaders,
+ XHeaders: prm.calculateXHeaders(),
BearerToken: prm.tokenBearer,
Session: prm.tokenSession,
Local: true,
@@ -496,7 +508,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
prm.cliPrm.Local = prm.local
prm.cliPrm.Session = prm.tokenSession
prm.cliPrm.BearerToken = prm.tokenBearer
- prm.cliPrm.XHeaders = prm.xHeaders
+ prm.cliPrm.XHeaders = prm.calculateXHeaders()
prm.cliPrm.Key = prm.key
rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm)
diff --git a/pkg/services/object/internal/key.go b/pkg/services/object/internal/key.go
index eba716976..1e0a7ef90 100644
--- a/pkg/services/object/internal/key.go
+++ b/pkg/services/object/internal/key.go
@@ -3,8 +3,8 @@ package internal
import (
"bytes"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
// VerifyResponseKeyV2 checks if response is signed with expected key. Returns client.ErrWrongPublicKey if not.
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index 61aed5003..6a6ee0f0f 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -4,8 +4,9 @@ import (
"context"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type (
@@ -34,7 +35,7 @@ type (
}
MetricRegister interface {
- AddRequestDuration(string, time.Duration, bool)
+ AddRequestDuration(string, time.Duration, bool, string)
AddPayloadSize(string, int)
}
)
@@ -51,7 +52,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
if m.enabled {
t := time.Now()
defer func() {
- m.metrics.AddRequestDuration("Get", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
}()
err = m.next.Get(req, &getStreamMetric{
ServerStream: stream,
@@ -64,11 +65,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
return
}
-func (m MetricCollector) Put() (PutObjectStream, error) {
+func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Put()
+ stream, err := m.next.Put(ctx)
if err != nil {
return nil, err
}
@@ -79,14 +80,14 @@ func (m MetricCollector) Put() (PutObjectStream, error) {
start: t,
}, nil
}
- return m.next.Put()
+ return m.next.Put(ctx)
}
-func (m MetricCollector) Patch() (PatchObjectStream, error) {
+func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Patch()
+ stream, err := m.next.Patch(ctx)
if err != nil {
return nil, err
}
@@ -97,7 +98,7 @@ func (m MetricCollector) Patch() (PatchObjectStream, error) {
start: t,
}, nil
}
- return m.next.Patch()
+ return m.next.Patch(ctx)
}
func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) {
@@ -106,7 +107,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl
res, err := m.next.PutSingle(ctx, request)
- m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
if err == nil {
m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload()))
}
@@ -122,7 +123,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest)
res, err := m.next.Head(ctx, request)
- m.metrics.AddRequestDuration("Head", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
@@ -135,7 +136,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream)
err := m.next.Search(req, stream)
- m.metrics.AddRequestDuration("Search", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
return err
}
@@ -148,7 +149,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque
res, err := m.next.Delete(ctx, request)
- m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
return m.next.Delete(ctx, request)
@@ -160,7 +161,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR
err := m.next.GetRange(req, stream)
- m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
return err
}
@@ -173,7 +174,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa
res, err := m.next.GetRangeHash(ctx, request)
- m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil)
+ m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
@@ -209,7 +210,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error
func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil)
+ s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
@@ -223,7 +224,7 @@ func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) e
func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil)
+ s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index f1082dfff..5d298bfed 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -26,9 +26,9 @@ func NewService(cfg *objectwriter.Config,
}
}
-// Put calls internal service and returns v2 object streamer.
+// Patch calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
- nodeKey, err := s.Config.KeyStorage.GetKey(nil)
+ nodeKey, err := s.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 85c28cda0..ff13b1d3e 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -7,13 +7,13 @@ import (
"fmt"
"io"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher"
@@ -57,12 +57,31 @@ func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
return hs
}
+func isLinkObject(hdr *objectV2.HeaderWithSignature) bool {
+ split := hdr.GetHeader().GetSplit()
+ return len(split.GetChildren()) > 0 && split.GetParent() != nil
+}
+
+func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool {
+ return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil
+}
+
func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
hdrWithSig, addr, err := s.readHeader(ctx, req)
if err != nil {
return err
}
+ if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular {
+ return errors.New("non-regular object can't be patched")
+ }
+ if isLinkObject(hdrWithSig) {
+ return errors.New("linking object can't be patched")
+ }
+ if isComplexObjectPart(hdrWithSig) {
+ return errors.New("complex object parts can't be patched")
+ }
+
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return err
@@ -93,11 +112,10 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
}
oV2.GetHeader().SetOwnerID(ownerID)
- target, err := target.New(&objectwriter.Params{
- Config: s.Config,
- Common: commonPrm,
- Header: objectSDK.NewFromV2(oV2),
- SignRequestPrivateKey: s.localNodeKey,
+ target, err := target.New(ctx, objectwriter.Params{
+ Config: s.Config,
+ Common: commonPrm,
+ Header: objectSDK.NewFromV2(oV2),
})
if err != nil {
return fmt.Errorf("target creation: %w", err)
@@ -177,7 +195,12 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
patch.FromV2(req.GetBody())
if !s.nonFirstSend {
- err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes)
+ err := s.patcher.ApplyHeaderPatch(ctx,
+ patcher.ApplyHeaderPatchPrm{
+ NewSplitHeader: patch.NewSplitHeader,
+ NewAttributes: patch.NewAttributes,
+ ReplaceAttributes: patch.ReplaceAttributes,
+ })
if err != nil {
return fmt.Errorf("patch attributes: %w", err)
}
@@ -196,6 +219,9 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
}
func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ if s.patcher == nil {
+ return nil, errors.New("uninitialized patch streamer")
+ }
patcherResp, err := s.patcher.Close(ctx)
if err != nil {
return nil, err
diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go
index 4f3c3ef17..b9416789c 100644
--- a/pkg/services/object/patch/util.go
+++ b/pkg/services/object/patch/util.go
@@ -6,8 +6,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go
index 0c8f12b45..52a7c102c 100644
--- a/pkg/services/object/put/prm.go
+++ b/pkg/services/object/put/prm.go
@@ -2,7 +2,6 @@ package putsvc
import (
"context"
- "crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -21,8 +20,6 @@ type PutInitPrm struct {
traverseOpts []placement.Option
relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
-
- privateKey *ecdsa.PrivateKey
}
type PutChunkPrm struct {
@@ -68,11 +65,3 @@ func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm {
return p
}
-
-func (p *PutInitPrm) WithPrivateKey(v *ecdsa.PrivateKey) *PutInitPrm {
- if p != nil {
- p.privateKey = v
- }
-
- return p
-}
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index 8cf4f0d62..7aeb5857d 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -27,9 +26,7 @@ func NewService(ks *objutil.KeyStorage,
opts ...objectwriter.Option,
) *Service {
c := &objectwriter.Config{
- RemotePool: util.NewPseudoWorkerPool(),
- LocalPool: util.NewPseudoWorkerPool(),
- Logger: &logger.Logger{Logger: zap.L()},
+ Logger: logger.NewLoggerWrapper(zap.L()),
KeyStorage: ks,
ClientConstructor: cc,
MaxSizeSrc: ms,
@@ -59,8 +56,8 @@ func NewService(ks *objutil.KeyStorage,
}
}
-func (p *Service) Put() (*Streamer, error) {
+func (s *Service) Put() (*Streamer, error) {
return &Streamer{
- Config: p.Config,
+ Config: s.Config,
}, nil
}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 9b4163268..90f473254 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -9,11 +9,6 @@ import (
"hash"
"sync"
- objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
@@ -26,9 +21,14 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
@@ -86,7 +86,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest
}
func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
- if err := s.validarePutSingleSize(obj); err != nil {
+ if err := s.validarePutSingleSize(ctx, obj); err != nil {
return object.ContentMeta{}, err
}
@@ -97,12 +97,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object)
return s.validatePutSingleObject(ctx, obj)
}
-func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error {
+func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error {
if uint64(len(obj.Payload())) != obj.PayloadSize() {
return target.ErrWrongPayloadSize
}
- maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize()
+ maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx)
if obj.PayloadSize() > maxAllowedSize {
return target.ErrExceedingMaxSize
}
@@ -153,7 +153,7 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob
func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
localOnly := req.GetMetaHeader().GetTTL() <= 1
- placement, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly)
+ placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly)
if err != nil {
return err
}
@@ -166,18 +166,18 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
}
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
- iter := s.Config.NewNodeIterator(placement.placementOptions)
+ iter := s.NewNodeIterator(placement.placementOptions)
iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.Config.KeyStorage,
+ keyStorage: s.KeyStorage,
signer: &sync.Once{},
}
return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error {
- return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
+ return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container)
})
}
@@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
if err != nil {
return err
}
- key, err := s.Config.KeyStorage.GetKey(nil)
+ key, err := s.KeyStorage.GetKey(nil)
if err != nil {
return err
}
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.Config.KeyStorage,
+ keyStorage: s.KeyStorage,
signer: &sync.Once{},
}
@@ -218,14 +218,14 @@ type putSinglePlacement struct {
resetSuccessAfterOnBroadcast bool
}
-func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
+func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
var result putSinglePlacement
cnrID, ok := obj.ContainerID()
if !ok {
return result, errors.New("missing container ID")
}
- cnrInfo, err := s.Config.ContainerSource.Get(cnrID)
+ cnrInfo, err := s.ContainerSource.Get(ctx, cnrID)
if err != nil {
return result, fmt.Errorf("could not get container by ID: %w", err)
}
@@ -249,31 +249,31 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
}
result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
- latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource)
+ latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource)
if err != nil {
return result, fmt.Errorf("could not get latest network map: %w", err)
}
builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
- builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys)
+ builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys)
}
result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
return result, nil
}
func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object,
- signer *putSingleRequestSigner, meta object.ContentMeta,
+ signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container,
) error {
if nodeDesc.Local {
- return s.saveLocal(ctx, obj, meta)
+ return s.saveLocal(ctx, obj, meta, container)
}
var info client.NodeInfo
client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
- c, err := s.Config.ClientConstructor.Get(info)
+ c, err := s.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
@@ -281,9 +281,10 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
return s.redirectPutSingleRequest(ctx, signer, obj, info, c)
}
-func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error {
+func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
localTarget := &objectwriter.LocalTarget{
- Storage: s.Config.LocalStore,
+ Storage: s.LocalStore,
+ Container: container,
}
return localTarget.WriteObject(ctx, obj, meta)
}
@@ -316,12 +317,11 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
- s.Config.Logger.Warn(logs.PutSingleRedirectFailure,
+ s.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
zap.Error(err),
zap.Stringer("address", addr),
zap.Stringer("object_id", objID),
zap.Stringer("container_id", cnrID),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
@@ -350,8 +350,12 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
err = signature.VerifyServiceMessage(resp)
if err != nil {
err = fmt.Errorf("response verification failed: %w", err)
+ return
}
+ st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus())
+ err = apistatus.ErrFromStatus(st)
+
return
})
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index d08e7fafa..19768b7fa 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -26,7 +26,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
}
// initialize destination target
- prmTarget := &objectwriter.Params{
+ prmTarget := objectwriter.Params{
Config: p.Config,
Common: prm.common,
Header: prm.hdr,
@@ -36,7 +36,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
}
var err error
- p.target, err = target.New(prmTarget)
+ p.target, err = target.New(ctx, prmTarget)
if err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go
index db902ae59..78d4c711d 100644
--- a/pkg/services/object/put/v2/service.go
+++ b/pkg/services/object/put/v2/service.go
@@ -4,10 +4,10 @@ import (
"context"
"fmt"
- objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Put operation of Object service v2.
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 5bf15b4cd..f0c648187 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -4,11 +4,6 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
@@ -17,6 +12,11 @@ import (
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -56,10 +56,10 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
s.saveChunks = v.GetSignature() != nil
if s.saveChunks {
- maxSz := s.stream.MaxSizeSrc.MaxObjectSize()
+ maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx)
s.sizes = &sizes{
- payloadSz: uint64(v.GetHeader().GetPayloadLength()),
+ payloadSz: v.GetHeader().GetPayloadLength(),
}
// check payload size limit overflow
diff --git a/pkg/services/object/put/v2/util.go b/pkg/services/object/put/v2/util.go
index a157a9542..5ec9ebe10 100644
--- a/pkg/services/object/put/v2/util.go
+++ b/pkg/services/object/put/v2/util.go
@@ -1,10 +1,10 @@
package putsvc
import (
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go
new file mode 100644
index 000000000..01eb1ea8d
--- /dev/null
+++ b/pkg/services/object/qos.go
@@ -0,0 +1,145 @@
+package object
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+)
+
+var _ ServiceServer = (*qosObjectService)(nil)
+
+type AdjustIOTag interface {
+ AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
+}
+
+type qosObjectService struct {
+ next ServiceServer
+ adj AdjustIOTag
+}
+
+func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer {
+ return &qosObjectService{
+ next: next,
+ adj: adjIOTag,
+ }
+}
+
+func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Delete(ctx, req)
+}
+
+func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Get(req, &qosReadStream[*object.GetResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.GetRangeHash(ctx, req)
+}
+
+func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Head(ctx, req)
+}
+
+func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ s, err := q.next.Patch(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{
+ s: s,
+ adj: q.adj,
+ }, nil
+}
+
+func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) {
+ s, err := q.next.Put(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &qosWriteStream[*object.PutRequest, *object.PutResponse]{
+ s: s,
+ adj: q.adj,
+ }, nil
+}
+
+func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.PutSingle(ctx, req)
+}
+
+func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Search(req, &qosReadStream[*object.SearchResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+type qosSend[T any] interface {
+ Send(T) error
+}
+
+type qosReadStream[T any] struct {
+ sender qosSend[T]
+ ctxF func() context.Context
+}
+
+func (g *qosReadStream[T]) Context() context.Context {
+ return g.ctxF()
+}
+
+func (g *qosReadStream[T]) Send(resp T) error {
+ return g.sender.Send(resp)
+}
+
+type qosVerificationHeader interface {
+ GetVerificationHeader() *session.RequestVerificationHeader
+}
+
+type qosSendRecv[TReq qosVerificationHeader, TResp any] interface {
+ Send(context.Context, TReq) error
+ CloseAndRecv(context.Context) (TResp, error)
+}
+
+type qosWriteStream[TReq qosVerificationHeader, TResp any] struct {
+ s qosSendRecv[TReq, TResp]
+ adj AdjustIOTag
+
+ ioTag string
+ ioTagDefined bool
+}
+
+func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) {
+ if q.ioTagDefined {
+ ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
+ }
+ return q.s.CloseAndRecv(ctx)
+}
+
+func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error {
+ if !q.ioTagDefined {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx)
+ }
+ assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment")
+ ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
+ return q.s.Send(ctx, req)
+}
diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/remote_reader.go
index 18b6107cf..bc6ffd160 100644
--- a/pkg/services/object/remote_reader.go
+++ b/pkg/services/object/remote_reader.go
@@ -2,7 +2,6 @@ package object
import (
"context"
- "errors"
"fmt"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -35,8 +34,6 @@ type RemoteRequestPrm struct {
const remoteOpTTL = 1
-var ErrNotFound = errors.New("object header not found")
-
// NewRemoteReader creates, initializes and returns new RemoteHeader instance.
func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader {
return &RemoteReader{
diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go
deleted file mode 100644
index 95d4c9d93..000000000
--- a/pkg/services/object/request_context.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package object
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-type RequestContextKeyT struct{}
-
-var RequestContextKey = RequestContextKeyT{}
-
-// RequestContext is a context passed between middleware handlers.
-type RequestContext struct {
- Namespace string
-
- SenderKey []byte
-
- ContainerOwner user.ID
-
- Role acl.Role
-
- SoftAPECheck bool
-
- BearerToken *bearer.Token
-}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index d7ba9f843..80c971e8f 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -4,8 +4,8 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type ResponseService struct {
@@ -80,8 +80,8 @@ func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutRespo
return r, nil
}
-func (s *ResponseService) Put() (PutObjectStream, error) {
- stream, err := s.svc.Put()
+func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
+ stream, err := s.svc.Put(ctx)
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -109,8 +109,8 @@ func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchR
return r, nil
}
-func (s *ResponseService) Patch() (PatchObjectStream, error) {
- stream, err := s.svc.Patch()
+func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ stream, err := s.svc.Patch(ctx)
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index d70574156..60d469b11 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -8,18 +8,19 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"go.uber.org/zap"
)
func (exec *execCtx) executeOnContainer(ctx context.Context) error {
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug(logs.TryingToExecuteInContainer,
+ exec.log.Debug(ctx, logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- if err := exec.initEpoch(); err != nil {
+ if err := exec.initEpoch(ctx); err != nil {
return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err)
}
@@ -43,11 +44,11 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
- exec.log.Debug(logs.ProcessEpoch,
+ exec.log.Debug(ctx, logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
- traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch)
+ traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch)
if err != nil {
return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err)
}
@@ -58,7 +59,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
+ exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
break
}
@@ -71,8 +72,8 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
defer wg.Done()
select {
case <-ctx.Done():
- exec.log.Debug(logs.InterruptPlacementIterationByContext,
- zap.String("error", ctx.Err().Error()))
+ exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
+ zap.Error(ctx.Err()))
return
default:
}
@@ -81,18 +82,18 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+ exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
- exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err))
return
}
ids, err := c.searchObjects(ctx, exec, info)
if err != nil {
- exec.log.Debug(logs.SearchRemoteOperationFailed,
- zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchRemoteOperationFailed,
+ zap.Error(err))
return
}
@@ -101,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
err = exec.writeIDList(ids)
mtx.Unlock()
if err != nil {
- exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err))
return
}
}(i)
@@ -112,3 +113,12 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
return nil
}
+
+func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) {
+ cnrID := exec.containerID()
+ cnr, err := exec.svc.containerSource.Get(ctx, cnrID)
+ if err != nil {
+ return containerSDK.Container{}, err
+ }
+ return cnr.Value, nil
+}
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index 4a2c04ecd..ced51ecce 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -1,6 +1,8 @@
package searchsvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -19,13 +21,13 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = &logger.Logger{Logger: l.With(
+ exec.log = l.With(
zap.String("request", "SEARCH"),
zap.Stringer("container", exec.containerID()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )}
+ )
}
func (exec *execCtx) isLocal() bool {
@@ -48,13 +50,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 {
return exec.prm.common.NetmapLookupDepth()
}
-func (exec *execCtx) initEpoch() error {
+func (exec *execCtx) initEpoch(ctx context.Context) error {
exec.curProcEpoch = exec.netmapEpoch()
if exec.curProcEpoch > 0 {
return nil
}
- e, err := exec.svc.currentEpochReceiver.Epoch()
+ e, err := exec.svc.currentEpochReceiver.Epoch(ctx)
if err != nil {
return err
}
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index cfaed13b8..ec65ab06a 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -11,7 +11,7 @@ import (
func (exec *execCtx) executeLocal(ctx context.Context) error {
ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil {
- exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err))
return err
}
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 4a5c414d5..76c091f85 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -20,26 +20,26 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(logs.ServingRequest)
+ exec.log.Debug(ctx, logs.ServingRequest)
err := exec.executeLocal(ctx)
- exec.logResult(err)
+ exec.logResult(ctx, err)
if exec.isLocal() {
- exec.log.Debug(logs.SearchReturnResultDirectly)
+ exec.log.Debug(ctx, logs.SearchReturnResultDirectly)
return err
}
err = exec.executeOnContainer(ctx)
- exec.logResult(err)
+ exec.logResult(ctx, err)
return err
}
-func (exec *execCtx) logResult(err error) {
+func (exec *execCtx) logResult(ctx context.Context, err error) {
switch {
default:
- exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
case err == nil:
- exec.log.Debug(logs.OperationFinishedSuccessfully)
+ exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
}
}
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index 44abcfe5b..918ad421f 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -6,10 +6,10 @@ import (
"crypto/sha256"
"errors"
"fmt"
+ "slices"
"strconv"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -17,6 +17,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -58,7 +59,7 @@ type simpleIDWriter struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch() (uint64, error) {
+func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
return uint64(e), nil
}
@@ -81,8 +82,8 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
- t, err := placement.NewTraverser(
+func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
+ t, err := placement.NewTraverser(context.Background(),
placement.ForContainer(g.c),
placement.UseBuilder(g.b[epoch]),
placement.WithoutSuccessTracking(),
@@ -90,7 +91,7 @@ func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch ui
return t, &containerCore.Container{Value: g.c}, err
}
-func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
@@ -103,8 +104,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.
return nil, errors.New("vectors for address not found")
}
- res := make([][]netmap.NodeInfo, len(vs))
- copy(res, vs)
+ res := slices.Clone(vs)
return res, nil
}
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index cc388c1b2..56fe56468 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -46,14 +46,16 @@ type cfg struct {
}
traverserGenerator interface {
- GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
currentEpochReceiver interface {
- Epoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
}
keyStore *util.KeyStorage
+
+ containerSource container.Source
}
// New creates, initializes and returns utility serving
@@ -63,10 +65,11 @@ func New(e *engine.StorageEngine,
tg *util.TraverserGenerator,
ns netmap.Source,
ks *util.KeyStorage,
+ cs container.Source,
opts ...Option,
) *Service {
c := &cfg{
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
clientConstructor: &clientConstructorWrapper{
constructor: cc,
},
@@ -76,6 +79,7 @@ func New(e *engine.StorageEngine,
traverserGenerator: tg,
currentEpochReceiver: ns,
keyStore: ks,
+ containerSource: cs,
}
for i := range opts {
@@ -90,6 +94,6 @@ func New(e *engine.StorageEngine,
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))}
+ c.log = l
}
}
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 67b6c0d01..0be5345b9 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -2,9 +2,11 @@ package searchsvc
import (
"context"
+ "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -52,7 +54,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error {
}
// exclude processed address
- list = append(list[:i], list[i+1:]...)
+ list = slices.Delete(list, i, i+1)
i--
}
@@ -112,9 +114,13 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
}
func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
+ cnr, err := exec.getContainer(ctx)
+ if err != nil {
+ return nil, err
+ }
var selectPrm engine.SelectPrm
selectPrm.WithFilters(exec.searchFilters())
- selectPrm.WithContainerID(exec.containerID())
+ selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr))
r, err := e.storage.Select(ctx, selectPrm)
if err != nil {
diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go
index 5a2e9b936..7bb6e4d3c 100644
--- a/pkg/services/object/search/v2/request_forwarder.go
+++ b/pkg/services/object/search/v2/request_forwarder.go
@@ -8,14 +8,14 @@ import (
"io"
"sync"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/search/v2/service.go b/pkg/services/object/search/v2/service.go
index 78b72ac79..856cd9f04 100644
--- a/pkg/services/object/search/v2/service.go
+++ b/pkg/services/object/search/v2/service.go
@@ -1,10 +1,10 @@
package searchsvc
import (
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Search operation of Object service v2.
diff --git a/pkg/services/object/search/v2/streamer.go b/pkg/services/object/search/v2/streamer.go
index 15e2d53d5..93b281343 100644
--- a/pkg/services/object/search/v2/streamer.go
+++ b/pkg/services/object/search/v2/streamer.go
@@ -1,9 +1,9 @@
package searchsvc
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/search/v2/util.go b/pkg/services/object/search/v2/util.go
index e971fa8e5..48ae98958 100644
--- a/pkg/services/object/search/v2/util.go
+++ b/pkg/services/object/search/v2/util.go
@@ -5,12 +5,12 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index da98ce245..e65293977 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -3,8 +3,8 @@ package object
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// GetObjectStream is an interface of FrostFS API v2 compatible object streamer.
@@ -41,8 +41,8 @@ type PatchObjectStream interface {
// serving v2 Object service.
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
- Put() (PutObjectStream, error)
- Patch() (PatchObjectStream, error)
+ Put(context.Context) (PutObjectStream, error)
+ Patch(context.Context) (PatchObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 35367aafe..fd8e926dd 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -5,13 +5,11 @@ import (
"crypto/ecdsa"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type SignService struct {
- key *ecdsa.PrivateKey
-
sigSvc *util.SignService
svc ServiceServer
@@ -48,7 +46,6 @@ type getRangeStreamSigner struct {
func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService {
return &SignService{
- key: key,
sigSvc: util.NewUnarySignService(key),
svc: svc,
}
@@ -99,15 +96,16 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- return nil, fmt.Errorf("could not close stream and receive response: %w", err)
+ err = fmt.Errorf("could not close stream and receive response: %w", err)
+ resp = new(object.PutResponse)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Put() (PutObjectStream, error) {
- stream, err := s.svc.Put()
+func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
+ stream, err := s.svc.Put(ctx)
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -135,15 +133,16 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- return nil, fmt.Errorf("could not close stream and receive response: %w", err)
+ err = fmt.Errorf("could not close stream and receive response: %w", err)
+ resp = new(object.PatchResponse)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Patch() (PatchObjectStream, error) {
- stream, err := s.svc.Patch()
+func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ stream, err := s.svc.Patch(ctx)
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index e560d6d8c..b446d3605 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -4,8 +4,8 @@ import (
"bytes"
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type (
@@ -87,12 +87,12 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream)
})
}
-func (c TransportSplitter) Put() (PutObjectStream, error) {
- return c.next.Put()
+func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) {
+ return c.next.Put(ctx)
}
-func (c TransportSplitter) Patch() (PatchObjectStream, error) {
- return c.next.Patch()
+func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) {
+ return c.next.Patch(ctx)
}
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
@@ -162,13 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error {
var newResp *object.SearchResponse
- for ln := uint64(len(ids)); ; {
+ for {
if newResp == nil {
newResp = new(object.SearchResponse)
newResp.SetBody(body)
}
- cut := min(s.addrAmount, ln)
+ cut := min(s.addrAmount, uint64(len(ids)))
body.SetIDList(ids[:cut])
newResp.SetMetaHeader(resp.GetMetaHeader())
diff --git a/pkg/services/object/util/key_test.go b/pkg/services/object/util/key_test.go
index cb7ddfde5..1753a26f7 100644
--- a/pkg/services/object/util/key_test.go
+++ b/pkg/services/object/util/key_test.go
@@ -5,10 +5,10 @@ import (
"crypto/elliptic"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
tokenStorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go
index 92beedaa7..b10826226 100644
--- a/pkg/services/object/util/log.go
+++ b/pkg/services/object/util/log.go
@@ -1,6 +1,8 @@
package util
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -8,18 +10,10 @@ import (
)
// LogServiceError writes error message of object service to provided logger.
-func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) {
- l.Error(logs.UtilObjectServiceError,
+func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) {
+ l.Error(ctx, logs.UtilObjectServiceError,
zap.String("node", network.StringifyGroup(node)),
zap.String("request", req),
- zap.String("error", err.Error()),
- )
-}
-
-// LogWorkerPoolError writes debug error message of object worker pool to provided logger.
-func LogWorkerPoolError(l *logger.Logger, req string, err error) {
- l.Error(logs.UtilCouldNotPushTaskToWorkerPool,
- zap.String("request", req),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go
index 1bd39f9ea..f74b0aab9 100644
--- a/pkg/services/object/util/placement.go
+++ b/pkg/services/object/util/placement.go
@@ -1,7 +1,9 @@
package util
import (
+ "context"
"fmt"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -43,8 +45,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu
}
}
-func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(cnr, obj, policy)
+func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -76,8 +78,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac
}
}
-func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(cnr, obj, policy)
+func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -92,7 +94,7 @@ func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapS
}
if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) {
- vs[i] = append(vs[i][:j], vs[i][j+1:]...)
+ vs[i] = slices.Delete(vs[i], j, j+1)
j--
}
}
@@ -122,15 +124,15 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav
// GenerateTraverser generates placement Traverser for provided object address
// using epoch-th network map.
-func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
+func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
// get network map by epoch
- nm, err := g.netMapSrc.GetNetMapByEpoch(epoch)
+ nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch)
if err != nil {
return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err)
}
// get container related container
- cnr, err := g.cnrSrc.Get(idCnr)
+ cnr, err := g.cnrSrc.Get(ctx, idCnr)
if err != nil {
return nil, nil, fmt.Errorf("could not get container: %w", err)
}
@@ -160,7 +162,7 @@ func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoc
)
}
- t, err := placement.NewTraverser(traverseOpts...)
+ t, err := placement.NewTraverser(ctx, traverseOpts...)
if err != nil {
return nil, nil, err
}
diff --git a/pkg/services/object/util/prm.go b/pkg/services/object/util/prm.go
index 022b9fe5b..34d8ec704 100644
--- a/pkg/services/object/util/prm.go
+++ b/pkg/services/object/util/prm.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strconv"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
@@ -100,11 +100,18 @@ func (p *CommonPrm) SetNetmapLookupDepth(v uint64) {
// ForgetTokens forgets all the tokens read from the request's
// meta information before.
-func (p *CommonPrm) ForgetTokens() {
+func (p *CommonPrm) ForgetTokens() func() {
if p != nil {
+ tk := p.token
+ br := p.bearer
p.token = nil
p.bearer = nil
+ return func() {
+ p.token = tk
+ p.bearer = br
+ }
}
+ return func() {}
}
func CommonPrmFromV2(req interface {
diff --git a/pkg/services/object_manager/placement/cache.go b/pkg/services/object_manager/placement/cache.go
index 217261877..2a8460ca5 100644
--- a/pkg/services/object_manager/placement/cache.go
+++ b/pkg/services/object_manager/placement/cache.go
@@ -3,6 +3,7 @@ package placement
import (
"crypto/sha256"
"fmt"
+ "slices"
"sync"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -44,7 +45,7 @@ func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p
raw, ok := c.containerCache.Get(cnr)
c.mtx.Unlock()
if ok {
- return raw, nil
+ return c.cloneResult(raw), nil
}
} else {
c.lastEpoch = nm.Epoch()
@@ -65,5 +66,13 @@ func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p
c.containerCache.Add(cnr, cn)
}
c.mtx.Unlock()
- return cn, nil
+ return c.cloneResult(cn), nil
+}
+
+func (c *ContainerNodesCache) cloneResult(nodes [][]netmapSDK.NodeInfo) [][]netmapSDK.NodeInfo {
+ result := make([][]netmapSDK.NodeInfo, len(nodes))
+ for repIdx := range nodes {
+ result[repIdx] = slices.Clone(nodes[repIdx])
+ }
+ return result
}
diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go
index a890d5357..7242970b5 100644
--- a/pkg/services/object_manager/placement/cache_test.go
+++ b/pkg/services/object_manager/placement/cache_test.go
@@ -85,7 +85,10 @@ func TestContainerNodesCache(t *testing.T) {
})
t.Run("the error is propagated", func(t *testing.T) {
var pp netmapSDK.PlacementPolicy
- require.NoError(t, pp.DecodeString("REP 1 SELECT 1 FROM X FILTER ATTR EQ 42 AS X"))
+ r := netmapSDK.ReplicaDescriptor{}
+ r.SetNumberOfObjects(1)
+ r.SetSelectorName("Missing")
+ pp.AddReplicas(r)
c := placement.NewContainerNodesCache(size)
_, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp)
diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go
new file mode 100644
index 000000000..0f24a9d96
--- /dev/null
+++ b/pkg/services/object_manager/placement/metrics.go
@@ -0,0 +1,185 @@
+package placement
+
+import (
+ "errors"
+ "fmt"
+ "maps"
+ "math"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
+ locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
+
+const (
+ attrPrefix = "$attribute:"
+
+ geoDistance = "$geoDistance"
+)
+
+type Metric interface {
+ CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int
+}
+
+type metricsParser struct {
+ locodeDBPath string
+ locodes map[string]locodedb.Point
+}
+
+type MetricParser interface {
+ ParseMetrics([]string) ([]Metric, error)
+}
+
+func NewMetricsParser(locodeDBPath string) (MetricParser, error) {
+ return &metricsParser{
+ locodeDBPath: locodeDBPath,
+ }, nil
+}
+
+func (p *metricsParser) initLocodes() error {
+ if len(p.locodes) != 0 {
+ return nil
+ }
+ if len(p.locodeDBPath) > 0 {
+ p.locodes = make(map[string]locodedb.Point)
+ locodeDB := locodebolt.New(locodebolt.Prm{
+ Path: p.locodeDBPath,
+ },
+ locodebolt.ReadOnly(),
+ )
+ err := locodeDB.Open()
+ if err != nil {
+ return err
+ }
+ defer locodeDB.Close()
+ err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) {
+ p.locodes[k] = v
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+ return errors.New("set path to locode database")
+}
+
+func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) {
+ var metrics []Metric
+ for _, raw := range priority {
+ if attr, found := strings.CutPrefix(raw, attrPrefix); found {
+ metrics = append(metrics, NewAttributeMetric(attr))
+ } else if raw == geoDistance {
+ err := p.initLocodes()
+ if err != nil {
+ return nil, err
+ }
+ if len(p.locodes) == 0 {
+ return nil, fmt.Errorf("provide locodes database for metric %s", raw)
+ }
+ m := NewGeoDistanceMetric(p.locodes)
+ metrics = append(metrics, m)
+ } else {
+ return nil, fmt.Errorf("unsupported priority metric %s", raw)
+ }
+ }
+ return metrics, nil
+}
+
+// attributeMetric describes priority metric based on attribute.
+type attributeMetric struct {
+ attribute string
+}
+
+// CalculateValue return [0] if from and to contains attribute attributeMetric.attribute and
+// the value of attribute is the same. In other case return [1].
+func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
+ fromAttr := from.Attribute(am.attribute)
+ toAttr := to.Attribute(am.attribute)
+ if len(fromAttr) > 0 && len(toAttr) > 0 && fromAttr == toAttr {
+ return 0
+ }
+ return 1
+}
+
+func NewAttributeMetric(attr string) Metric {
+ return &attributeMetric{attribute: attr}
+}
+
+// geoDistanceMetric describes priority metric based on attribute.
+type geoDistanceMetric struct {
+ locodes map[string]locodedb.Point
+ distance *atomic.Pointer[map[string]int]
+ mtx sync.Mutex
+}
+
+func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric {
+ d := atomic.Pointer[map[string]int]{}
+ m := make(map[string]int)
+ d.Store(&m)
+ gm := &geoDistanceMetric{
+ locodes: locodes,
+ distance: &d,
+ }
+ return gm
+}
+
+// CalculateValue return distance in kilometers between current node and provided,
+// if coordinates for provided node found. In other case return math.MaxInt.
+func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
+ fl := from.LOCODE()
+ tl := to.LOCODE()
+ if fl == tl {
+ return 0
+ }
+ m := gm.distance.Load()
+ if v, ok := (*m)[fl+tl]; ok {
+ return v
+ }
+ return gm.calculateDistance(fl, tl)
+}
+
+func (gm *geoDistanceMetric) calculateDistance(from, to string) int {
+ gm.mtx.Lock()
+ defer gm.mtx.Unlock()
+ od := gm.distance.Load()
+ if v, ok := (*od)[from+to]; ok {
+ return v
+ }
+ nd := maps.Clone(*od)
+ var dist int
+ pointFrom, okFrom := gm.locodes[from]
+ pointTo, okTo := gm.locodes[to]
+ if okFrom && okTo {
+ dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude()))
+ } else {
+ dist = math.MaxInt
+ }
+ nd[from+to] = dist
+ gm.distance.Store(&nd)
+
+ return dist
+}
+
+// distance return amount of KM between two points.
+// Parameters are latitude and longitude of point 1 and 2 in decimal degrees.
+// Original implementation can be found here https://www.geodatasource.com/developers/go.
+func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 {
+ radLat1 := math.Pi * lt1 / 180
+ radLat2 := math.Pi * lt2 / 180
+ radTheta := math.Pi * (ln1 - ln2) / 180
+
+ dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta)
+
+ if dist > 1 {
+ dist = 1
+ }
+
+ dist = math.Acos(dist)
+ dist = dist * 180 / math.Pi
+ dist = dist * 60 * 1.1515 * 1.609344
+
+ return dist
+}
diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go
index 1782e27ea..b3f8d9c03 100644
--- a/pkg/services/object_manager/placement/netmap.go
+++ b/pkg/services/object_manager/placement/netmap.go
@@ -1,6 +1,7 @@
package placement
import (
+ "context"
"crypto/sha256"
"fmt"
@@ -35,12 +36,12 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder {
}
}
-func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) {
+func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) {
return s.nm, nil
}
-func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- nm, err := netmap.GetLatestNetworkMap(b.nmSrc)
+func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc)
if err != nil {
return nil, fmt.Errorf("could not get network map: %w", err)
}
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index 4e790628f..a3f9af959 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -1,8 +1,10 @@
package placement
import (
+ "context"
"errors"
"fmt"
+ "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -20,7 +22,12 @@ type Builder interface {
//
// Must return all container nodes if object identifier
// is nil.
- BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
+ BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
+}
+
+type NodeState interface {
+ // LocalNodeInfo return current node state in FrostFS API v2 NodeInfo structure.
+ LocalNodeInfo() *netmap.NodeInfo
}
// Option represents placement traverser option.
@@ -50,6 +57,10 @@ type cfg struct {
policy netmap.PlacementPolicy
builder Builder
+
+ metrics []Metric
+
+ nodeState NodeState
}
const invalidOptsMsg = "invalid traverser options"
@@ -68,7 +79,7 @@ func defaultCfg() *cfg {
}
// NewTraverser creates, initializes with options and returns Traverser instance.
-func NewTraverser(opts ...Option) (*Traverser, error) {
+func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
cfg := defaultCfg()
for i := range opts {
@@ -88,7 +99,7 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy)
}
- ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy)
+ ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy)
if err != nil {
return nil, fmt.Errorf("could not build placement: %w", err)
}
@@ -99,7 +110,20 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
}
var rem []int
- if cfg.flatSuccess != nil {
+ if len(cfg.metrics) > 0 && cfg.nodeState != nil {
+ rem = defaultCopiesVector(cfg.policy)
+ var unsortedVector []netmap.NodeInfo
+ var regularVector []netmap.NodeInfo
+ for i := range rem {
+ pivot := min(len(ns[i]), rem[i])
+ unsortedVector = append(unsortedVector, ns[i][:pivot]...)
+ regularVector = append(regularVector, ns[i][pivot:]...)
+ }
+ rem = []int{-1, -1}
+
+ sortedVector := sortVector(cfg, unsortedVector)
+ ns = [][]netmap.NodeInfo{sortedVector, regularVector}
+ } else if cfg.flatSuccess != nil {
ns = flatNodes(ns)
rem = []int{int(*cfg.flatSuccess)}
} else {
@@ -157,6 +181,35 @@ func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo {
return [][]netmap.NodeInfo{flat}
}
+type nodeMetrics struct {
+ index int
+ metrics []int
+}
+
+func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo {
+ nm := make([]nodeMetrics, len(unsortedVector))
+ node := cfg.nodeState.LocalNodeInfo()
+
+ for i := range unsortedVector {
+ m := make([]int, len(cfg.metrics))
+ for j, pm := range cfg.metrics {
+ m[j] = pm.CalculateValue(node, &unsortedVector[i])
+ }
+ nm[i] = nodeMetrics{
+ index: i,
+ metrics: m,
+ }
+ }
+ slices.SortStableFunc(nm, func(a, b nodeMetrics) int {
+ return slices.Compare(a.metrics, b.metrics)
+ })
+ sortedVector := make([]netmap.NodeInfo, len(unsortedVector))
+ for i := range unsortedVector {
+ sortedVector[i] = unsortedVector[nm[i].index]
+ }
+ return sortedVector
+}
+
// Node is a descriptor of storage node with information required for intra-container communication.
type Node struct {
addresses network.AddressGroup
@@ -235,8 +288,8 @@ func (t *Traverser) Next() []Node {
func (t *Traverser) skipEmptyVectors() {
for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body
if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 {
- t.vectors = append(t.vectors[:i], t.vectors[i+1:]...)
- t.rem = append(t.rem[:i], t.rem[i+1:]...)
+ t.vectors = slices.Delete(t.vectors, i, i+1)
+ t.rem = slices.Delete(t.rem, i, i+1)
i--
} else {
break
@@ -322,3 +375,17 @@ func WithCopyNumbers(v []uint32) Option {
c.copyNumbers = v
}
}
+
+// WithPriorityMetrics use provided priority metrics to sort nodes.
+func WithPriorityMetrics(m []Metric) Option {
+ return func(c *cfg) {
+ c.metrics = m
+ }
+}
+
+// WithNodeState provide state of the current node.
+func WithNodeState(s NodeState) Option {
+ return func(c *cfg) {
+ c.nodeState = s
+ }
+}
diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go
index b3b57677d..d1370f21e 100644
--- a/pkg/services/object_manager/placement/traverser_test.go
+++ b/pkg/services/object_manager/placement/traverser_test.go
@@ -1,6 +1,8 @@
package placement
import (
+ "context"
+ "slices"
"strconv"
"testing"
@@ -17,12 +19,14 @@ type testBuilder struct {
vectors [][]netmap.NodeInfo
}
-func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return b.vectors, nil
}
func testNode(v uint32) (n netmap.NodeInfo) {
- n.SetNetworkEndpoints("/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)))
+ ip := "/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v))
+ n.SetNetworkEndpoints(ip)
+ n.SetPublicKey([]byte(ip))
return n
}
@@ -31,8 +35,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
vc := make([][]netmap.NodeInfo, 0, len(v))
for i := range v {
- ns := make([]netmap.NodeInfo, len(v[i]))
- copy(ns, v[i])
+ ns := slices.Clone(v[i])
vc = append(vc, ns)
}
@@ -40,7 +43,15 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
return vc
}
-func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) {
+func testPlacement(ss []int, rs []int) ([][]netmap.NodeInfo, container.Container) {
+ return placement(ss, rs, nil)
+}
+
+func testECPlacement(ss []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) {
+ return placement(ss, nil, ec)
+}
+
+func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) {
nodes := make([][]netmap.NodeInfo, 0, len(rs))
replicas := make([]netmap.ReplicaDescriptor, 0, len(rs))
num := uint32(0)
@@ -56,7 +67,12 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) {
nodes = append(nodes, ns)
var rd netmap.ReplicaDescriptor
- rd.SetNumberOfObjects(uint32(rs[i]))
+ if len(rs) > 0 {
+ rd.SetNumberOfObjects(uint32(rs[i]))
+ } else {
+ rd.SetECDataCount(uint32(ec[i][0]))
+ rd.SetECParityCount(uint32(ec[i][1]))
+ }
replicas = append(replicas, rd)
}
@@ -87,7 +103,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithoutSuccessTracking(),
@@ -116,7 +132,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -134,7 +150,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
err = n.FromIterator(netmapcore.Node(nodes[1][0]))
require.NoError(t, err)
- require.Equal(t, []Node{{addresses: n}}, tr.Next())
+ require.Equal(t, []Node{{addresses: n, key: []byte("/ip4/0.0.0.0/tcp/5")}}, tr.Next())
})
t.Run("put scenario", func(t *testing.T) {
@@ -145,7 +161,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
)
@@ -186,7 +202,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodes, cnr := testPlacement(selectors, replicas)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local)
@@ -261,7 +277,7 @@ func TestTraverserRemValues(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithCopyNumbers(testCase.copyNumbers),
@@ -275,3 +291,363 @@ func TestTraverserRemValues(t *testing.T) {
})
}
}
+
+type nodeState struct {
+ node *netmap.NodeInfo
+}
+
+func (n *nodeState) LocalNodeInfo() *netmap.NodeInfo {
+ return n.node
+}
+
+func TestTraverserPriorityMetrics(t *testing.T) {
+ t.Run("one rep one metric", func(t *testing.T) {
+ selectors := []int{4}
+ replicas := []int{3}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "A")
+ // Node_2, PK - ip4/0.0.0.0/tcp/2
+ nodes[0][2].SetAttribute("ClusterName", "B")
+ // Node_3, PK - ip4/0.0.0.0/tcp/3
+ nodes[0][3].SetAttribute("ClusterName", "B")
+
+ sdkNode := testNode(5)
+ sdkNode.SetAttribute("ClusterName", "B")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{NewAttributeMetric("ClusterName")}
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `ClusterName` the order will be:
+ // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}]
+ // With priority metric `ClusterName` and current node in cluster B
+ // the order should be:
+ // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 3, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey()))
+
+ next = tr.Next()
+ // The last node is
+ require.Equal(t, 1, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
+ t.Run("one rep one metric fewer nodes", func(t *testing.T) {
+ selectors := []int{2}
+ replicas := []int{3}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "B")
+
+ sdkNode := testNode(5)
+ sdkNode.SetAttribute("ClusterName", "B")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{NewAttributeMetric("ClusterName")}
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `ClusterName` the order will be:
+ // [ {Node_0 A}, {Node_1 A} ]
+ // With priority metric `ClusterName` and current node in cluster B
+ // the order should be:
+ // [ {Node_1 B}, {Node_0 A} ]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
+ t.Run("two reps two metrics", func(t *testing.T) {
+ selectors := []int{3, 3}
+ replicas := []int{2, 2}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // REPLICA #1
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ nodes[0][0].SetAttribute("UN-LOCODE", "RU LED")
+
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "A")
+ nodes[0][1].SetAttribute("UN-LOCODE", "FI HEL")
+
+ // Node_2, PK - ip4/0.0.0.0/tcp/2
+ nodes[0][2].SetAttribute("ClusterName", "A")
+ nodes[0][2].SetAttribute("UN-LOCODE", "RU LED")
+
+ // REPLICA #2
+ // Node_3 ip4/0.0.0.0/tcp/3
+ nodes[1][0].SetAttribute("ClusterName", "B")
+ nodes[1][0].SetAttribute("UN-LOCODE", "RU MOW")
+
+ // Node_4, PK - ip4/0.0.0.0/tcp/4
+ nodes[1][1].SetAttribute("ClusterName", "B")
+ nodes[1][1].SetAttribute("UN-LOCODE", "RU DME")
+
+ // Node_5, PK - ip4/0.0.0.0/tcp/5
+ nodes[1][2].SetAttribute("ClusterName", "B")
+ nodes[1][2].SetAttribute("UN-LOCODE", "RU MOW")
+
+ sdkNode := testNode(9)
+ sdkNode.SetAttribute("ClusterName", "B")
+ sdkNode.SetAttribute("UN-LOCODE", "RU DME")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{
+ NewAttributeMetric("ClusterName"),
+ NewAttributeMetric("UN-LOCODE"),
+ }
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Check that nodes in the same cluster and
+ // in the same location should be the first in slice.
+ // Nodes which are follow criteria but stay outside the replica
+ // should be in the next slice.
+
+ next := tr.Next()
+ require.Equal(t, 4, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey()))
+
+ next = tr.Next()
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+
+ sdkNode.SetAttribute("ClusterName", "B")
+ sdkNode.SetAttribute("UN-LOCODE", "RU MOW")
+
+ nodesCopy = copyVectors(nodes)
+
+ tr, err = NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ next = tr.Next()
+ require.Equal(t, 4, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey()))
+
+ next = tr.Next()
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+
+ sdkNode.SetAttribute("ClusterName", "A")
+ sdkNode.SetAttribute("UN-LOCODE", "RU LED")
+
+ nodesCopy = copyVectors(nodes)
+
+ tr, err = NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ next = tr.Next()
+ require.Equal(t, 4, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[2].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[3].PublicKey()))
+
+ next = tr.Next()
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
+ t.Run("ec container", func(t *testing.T) {
+ selectors := []int{4}
+ ec := [][]int{{2, 1}}
+
+ nodes, cnr := testECPlacement(selectors, ec)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "A")
+ // Node_2, PK - ip4/0.0.0.0/tcp/2
+ nodes[0][2].SetAttribute("ClusterName", "B")
+ // Node_3, PK - ip4/0.0.0.0/tcp/3
+ nodes[0][3].SetAttribute("ClusterName", "B")
+
+ sdkNode := testNode(5)
+ sdkNode.SetAttribute("ClusterName", "B")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{NewAttributeMetric("ClusterName")}
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `ClusterName` the order will be:
+ // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}]
+ // With priority metric `ClusterName` and current node in cluster B
+ // the order should be:
+ // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 3, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey()))
+
+ next = tr.Next()
+ // The last node is
+ require.Equal(t, 1, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
+ t.Run("one rep one geo metric", func(t *testing.T) {
+ t.Skip()
+ selectors := []int{2}
+ replicas := []int{2}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("UN-LOCODE", "RU LED")
+
+ sdkNode := testNode(2)
+ sdkNode.SetAttribute("UN-LOCODE", "FI HEL")
+
+ nodesCopy := copyVectors(nodes)
+
+ parser, err := NewMetricsParser("/path/to/locode_db")
+ require.NoError(t, err)
+ m, err := parser.ParseMetrics([]string{geoDistance})
+ require.NoError(t, err)
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `$geoDistance` the order will be:
+ // [ {Node_0 RU MOW}, {Node_1 RU LED}]
+ // With priority metric `$geoDistance` the order should be:
+ // [ {Node_1 RU LED}, {Node_0 RU MOW}]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+}
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index c3c810001..e5f001d5a 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -4,9 +4,9 @@ import (
"context"
"strconv"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
@@ -57,14 +57,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
ts, err := g.tsSource.Tombstone(ctx, a, epoch)
if err != nil {
- log.Warn(
+ log.Warn(ctx,
logs.TombstoneCouldNotGetTheTombstoneTheSource,
zap.Error(err),
)
- } else {
- if ts != nil {
- return g.handleTS(addrStr, ts, epoch)
- }
+ } else if ts != nil {
+ return g.handleTS(ctx, addrStr, ts, epoch)
}
// requested tombstone not
@@ -72,12 +70,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
return false
}
-func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
+func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
for _, atr := range ts.Attributes() {
- if atr.Key() == objectV2.SysAttributeExpEpoch || atr.Key() == objectV2.SysAttributeExpEpochNeoFS {
+ if atr.Key() == objectV2.SysAttributeExpEpoch {
epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
if err != nil {
- g.log.Warn(
+ g.log.Warn(ctx,
logs.TombstoneExpirationParseFailure,
zap.Error(err),
)
diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go
index 9d33e8179..2147a32fe 100644
--- a/pkg/services/object_manager/tombstone/constructor.go
+++ b/pkg/services/object_manager/tombstone/constructor.go
@@ -3,6 +3,7 @@ package tombstone
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
@@ -23,7 +24,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- log: &logger.Logger{Logger: zap.NewNop()},
+ log: logger.NewLoggerWrapper(zap.NewNop()),
cacheSize: defaultLRUCacheSize,
}
}
@@ -49,9 +50,7 @@ func NewChecker(oo ...Option) *ExpirationChecker {
panicOnNil(cfg.tsSource, "Tombstone source")
cache, err := lru.New[string, uint64](cfg.cacheSize)
- if err != nil {
- panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err))
- }
+ assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize))
return &ExpirationChecker{
cache: cache,
diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go
index 1ff07b05a..975941847 100644
--- a/pkg/services/object_manager/tombstone/source/source.go
+++ b/pkg/services/object_manager/tombstone/source/source.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -38,9 +39,7 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) {
// Panics if any of the provided options does not allow
// constructing a valid tombstone local Source.
func NewSource(p TombstoneSourcePrm) Source {
- if p.s == nil {
- panic("Tombstone source: nil object service")
- }
+ assert.False(p.s == nil, "Tombstone source: nil object service")
return Source(p)
}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index bf67ec4d4..dcaaec0b4 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -9,18 +9,29 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
policycore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) error {
- cnr, err := p.cnrSrc.Get(objInfo.Address.Container())
+ ctx, span := tracing.StartSpanFromContext(ctx, "Policer.ProcessObject", trace.WithAttributes(
+ attribute.String("address", objInfo.Address.String()),
+ attribute.Bool("is_linking_object", objInfo.IsLinkingObject),
+ attribute.Bool("is_ec_part", objInfo.ECInfo != nil),
+ attribute.String("type", objInfo.Type.String()),
+ ))
+ defer span.End()
+
+ cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container())
if err != nil {
if client.IsErrContainerNotFound(err) {
- existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, objInfo.Address.Container())
+ existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container())
if errWasRemoved != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved)
} else if existed {
@@ -37,7 +48,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
policy := cnr.Value.PlacementPolicy()
if policycore.IsECPlacement(policy) {
- return p.processECContainerObject(ctx, objInfo, policy)
+ return p.processECContainerObject(ctx, objInfo, cnr.Value)
}
return p.processRepContainerObject(ctx, objInfo, policy)
}
@@ -45,7 +56,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
idObj := objInfo.Address.Object()
idCnr := objInfo.Address.Container()
- nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy)
+ nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -75,7 +86,7 @@ func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectc
}
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", objInfo.Address),
)
@@ -99,6 +110,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
// Number of copies that are stored on maintenance nodes.
var uncheckedCopies int
+ var candidates []netmap.NodeInfo
for i := 0; shortage > 0 && i < len(nodes); i++ {
select {
case <-ctx.Done():
@@ -106,71 +118,68 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
default:
}
- if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) {
- requirements.needLocalCopy = true
-
- shortage--
- } else if nodes[i].IsMaintenance() {
- shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
- } else {
- if status := checkedNodes.processStatus(nodes[i]); status.Processed() {
- if status == nodeHoldsObject {
- // node already contains replica, no need to replicate
- nodes = append(nodes[:i], nodes[i+1:]...)
- i--
- shortage--
- }
-
+ var err error
+ st := checkedNodes.processStatus(nodes[i])
+ if !st.Processed() {
+ st, err = p.checkStatus(ctx, addr, nodes[i])
+ checkedNodes.set(nodes[i], st)
+ if st == nodeDoesNotHoldObject {
+ // 1. This is the first time the node is encountered (`!st.Processed()`).
+ // 2. The node does not hold object (`st == nodeDoesNotHoldObject`).
+ // So we need to try to put an object to it.
+ candidates = append(candidates, nodes[i])
continue
}
-
- callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
-
- _, err := p.remoteHeader(callCtx, nodes[i], addr, false)
-
- cancel()
-
- if err == nil {
- shortage--
- checkedNodes.submitReplicaHolder(nodes[i])
- } else {
- if client.IsErrObjectNotFound(err) {
- checkedNodes.submitReplicaCandidate(nodes[i])
- continue
- } else if client.IsErrNodeUnderMaintenance(err) {
- shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
- } else {
- p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
- zap.Stringer("object", addr),
- zap.String("error", err.Error()),
- )
- }
- }
}
- nodes = append(nodes[:i], nodes[i+1:]...)
- i--
+ switch st {
+ case nodeIsLocal:
+ requirements.needLocalCopy = true
+
+ shortage--
+ case nodeIsUnderMaintenance:
+ shortage--
+ uncheckedCopies++
+
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK,
+ zap.String("node", netmap.StringifyPublicKey(nodes[i])))
+ case nodeHoldsObject:
+ shortage--
+ case nodeDoesNotHoldObject:
+ case nodeStatusUnknown:
+ p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
+ zap.Stringer("object", addr),
+ zap.Error(err))
+ default:
+ panic("unreachable")
+ }
}
- p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
+ p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies)
}
-// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
-//
-// consider remote nodes under maintenance as problem OK. Such
-// nodes MAY not respond with object, however, this is how we
-// prevent spam with new replicas.
-// However, additional copies should not be removed in this case,
-// because we can remove the only copy this way.
-func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
- checkedNodes.submitReplicaHolder(node)
- shortage--
- uncheckedCopies++
+func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) {
+ if p.netmapKeys.IsLocalKey(node.PublicKey()) {
+ return nodeIsLocal, nil
+ }
+ if node.Status().IsMaintenance() {
+ return nodeIsUnderMaintenance, nil
+ }
- p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK,
- zap.String("node", netmap.StringifyPublicKey(node)),
- )
- return shortage, uncheckedCopies
+ callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
+ _, err := p.remoteHeader(callCtx, node, addr, false)
+ cancel()
+
+ if err == nil {
+ return nodeHoldsObject, nil
+ }
+ if client.IsErrObjectNotFound(err) {
+ return nodeDoesNotHoldObject, nil
+ }
+ if client.IsErrNodeUnderMaintenance(err) {
+ return nodeIsUnderMaintenance, nil
+ }
+ return nodeStatusUnknown, err
}
func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
@@ -178,7 +187,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
) {
switch {
case shortage > 0:
- p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected,
+ p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected,
zap.Stringer("object", addr),
zap.Uint32("shortage", shortage),
)
@@ -194,7 +203,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
case uncheckedCopies > 0:
// If we have more copies than needed, but some of them are from the maintenance nodes,
// save the local copy.
- p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
+ p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
zap.Int("count", uncheckedCopies))
case uncheckedCopies == 0:
diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go
index d4c7ccbf9..69879c439 100644
--- a/pkg/services/policer/check_test.go
+++ b/pkg/services/policer/check_test.go
@@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) {
cache.SubmitSuccessfulReplication(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
- cache.submitReplicaCandidate(node)
+ cache.set(node, nodeDoesNotHoldObject)
require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject)
- cache.submitReplicaHolder(node)
+ cache.set(node, nodeHoldsObject)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
}
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index 61a65fc21..fbdeb3148 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -10,6 +10,7 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
@@ -27,18 +28,18 @@ type ecChunkProcessResult struct {
var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node")
-func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
+func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
if objInfo.ECInfo == nil {
- return p.processECContainerRepObject(ctx, objInfo, policy)
+ return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy())
}
- return p.processECContainerECObject(ctx, objInfo, policy)
+ return p.processECContainerECObject(ctx, objInfo, cnr)
}
// processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects.
// All of them must be stored on all of the container nodes.
func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
objID := objInfo.Address.Object()
- nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy)
+ nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -58,7 +59,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes)
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", objInfo.Address),
)
@@ -67,8 +68,8 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
return nil
}
-func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
- nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, policy)
+func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
+ nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -85,12 +86,12 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
res := p.processECChunk(ctx, objInfo, nn[0])
if !res.validPlacement {
// drop local chunk only if all required chunks are in place
- res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0])
+ res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr)
}
- p.adjustECPlacement(ctx, objInfo, nn[0], policy)
+ p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
if res.removeLocal {
- p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
p.cbRedundantCopy(ctx, objInfo.Address)
}
return nil
@@ -100,15 +101,15 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
var removeLocalChunk bool
requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
- if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
// current node is required node, we are happy
return ecChunkProcessResult{
validPlacement: true,
}
}
- if requiredNode.IsMaintenance() {
+ if requiredNode.Status().IsMaintenance() {
// consider maintenance mode has object, but do not drop local copy
- p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
return ecChunkProcessResult{}
}
@@ -119,7 +120,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
if err == nil {
removeLocalChunk = true
} else if client.IsErrObjectNotFound(err) {
- p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
+ p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
task := replicator.Task{
NumCopies: 1,
Addr: objInfo.Address,
@@ -128,9 +129,9 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
p.replicator.HandleReplicationTask(ctx, task, newNodeCache())
} else if client.IsErrNodeUnderMaintenance(err) {
// consider maintenance mode has object, but do not drop local copy
- p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
} else {
- p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error()))
+ p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err))
}
return ecChunkProcessResult{
@@ -138,20 +139,20 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
}
}
-func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) bool {
+func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool {
var parentAddress oid.Address
parentAddress.SetContainer(objInfo.Address.Container())
parentAddress.SetObject(objInfo.ECInfo.ParentID)
requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo)
if len(requiredChunkIndexes) == 0 {
- p.log.Info(logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
+ p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
return true
}
err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes)
if err != nil {
- p.log.Error(logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
return false
}
if len(requiredChunkIndexes) == 0 {
@@ -169,8 +170,9 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I
addr.SetContainer(objInfo.Address.Container())
addr.SetObject(indexToObjectID[index])
p.replicator.HandlePullTask(ctx, replicator.Task{
- Addr: addr,
- Nodes: candidates,
+ Addr: addr,
+ Nodes: candidates,
+ Container: cnr,
})
}
// there was some missing chunks, it's not ok
@@ -183,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec
if uint32(i) == objInfo.ECInfo.Total {
break
}
- if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
}
}
@@ -208,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad
func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
var eiErr *objectSDK.ECInfoError
for _, n := range nodes {
- if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
continue
}
_, err := p.remoteHeader(ctx, n, parentAddress, true)
@@ -222,11 +224,11 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
var chunkID oid.ID
if err := chunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
return false
}
if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID {
- p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
+ p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
return false
}
@@ -237,7 +239,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
for index, candidates := range required {
if len(candidates) == 0 {
- p.log.Error(logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
+ p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
return false
}
}
@@ -245,7 +247,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
return true
}
-func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, policy netmap.PlacementPolicy) {
+func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) {
var parentAddress oid.Address
parentAddress.SetContainer(objInfo.Address.Container())
parentAddress.SetObject(objInfo.ECInfo.ParentID)
@@ -258,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
return
}
var err error
- if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
_, err = p.localHeader(ctx, parentAddress)
} else {
_, err = p.remoteHeader(ctx, n, parentAddress, true)
@@ -269,18 +271,20 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
resolved[ch.Index] = append(resolved[ch.Index], n)
var ecInfoChunkID oid.ID
if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
return
}
if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID {
- p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
+ p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
return
}
chunkIDs[ch.Index] = ecInfoChunkID
}
- } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
- p.log.Warn(logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
+ } else if client.IsErrObjectAlreadyRemoved(err) {
+ restore = false
+ } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
+ p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
p.replicator.HandleReplicationTask(ctx, replicator.Task{
NumCopies: 1,
Addr: objInfo.Address,
@@ -292,21 +296,23 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total {
return
}
- if objInfo.ECInfo.Total-uint32(len(resolved)) > policy.ReplicaDescriptor(0).GetECParityCount() {
+ if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() {
var found []uint32
for i := range resolved {
found = append(found, i)
}
- p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
+ p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
return
}
- p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, policy)
+ p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
}
-func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, policy netmap.PlacementPolicy) {
- c, err := erasurecode.NewConstructor(int(policy.ReplicaDescriptor(0).GetECDataCount()), int(policy.ReplicaDescriptor(0).GetECParityCount()))
+func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID,
+ cnr containerSDK.Container,
+) {
+ c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
if err != nil {
- p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs)
@@ -315,7 +321,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
}
key, err := p.keyStorage.GetKey(nil)
if err != nil {
- p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
required := make([]bool, len(parts))
@@ -325,7 +331,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
}
}
if err := c.ReconstructParts(parts, required, key); err != nil {
- p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
for idx, part := range parts {
@@ -337,10 +343,11 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
pID, _ := part.ID()
addr.SetObject(pID)
targetNode := nodes[idx%len(nodes)]
- if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
p.replicator.HandleLocalPutTask(ctx, replicator.Task{
- Addr: addr,
- Obj: part,
+ Addr: addr,
+ Obj: part,
+ Container: cnr,
})
} else {
p.replicator.HandleReplicationTask(ctx, replicator.Task{
@@ -364,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
var obj *objectSDK.Object
var err error
for _, node := range nodes {
- if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
+ if p.netmapKeys.IsLocalKey(node.PublicKey()) {
obj, err = p.localObject(egCtx, objID)
} else {
obj, err = p.remoteObject(egCtx, node, objID)
@@ -372,7 +379,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
if err == nil {
break
}
- p.log.Warn(logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
+ p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
}
if obj != nil {
parts[idx] = obj
@@ -381,7 +388,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
})
}
if err := errGroup.Wait(); err != nil {
- p.log.Error(logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
return nil
}
return parts
diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go
index e230153f9..c6980536b 100644
--- a/pkg/services/policer/ec_test.go
+++ b/pkg/services/policer/ec_test.go
@@ -36,7 +36,7 @@ func TestECChunkHasValidPlacement(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(chunkAddress.Container()) {
return cnr, nil
}
@@ -123,7 +123,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(chunkAddress.Container()) {
return cnr, nil
}
@@ -448,7 +448,7 @@ func TestECChunkRestore(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(parentAddress.Container()) {
return cnr, nil
}
@@ -599,7 +599,7 @@ func TestECChunkRestoreNodeOff(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(parentAddress.Container()) {
return cnr, nil
}
diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go
index cd47cb0fc..c2157de5d 100644
--- a/pkg/services/policer/nodecache.go
+++ b/pkg/services/policer/nodecache.go
@@ -8,6 +8,9 @@ const (
nodeNotProcessed nodeProcessStatus = iota
nodeDoesNotHoldObject
nodeHoldsObject
+ nodeStatusUnknown
+ nodeIsUnderMaintenance
+ nodeIsLocal
)
func (st nodeProcessStatus) Processed() bool {
@@ -15,37 +18,19 @@ func (st nodeProcessStatus) Processed() bool {
}
// nodeCache tracks Policer's check progress.
-type nodeCache map[uint64]bool
+type nodeCache map[uint64]nodeProcessStatus
func newNodeCache() nodeCache {
- return make(map[uint64]bool)
+ return make(map[uint64]nodeProcessStatus)
}
-func (n nodeCache) set(node netmap.NodeInfo, val bool) {
+func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) {
n[node.Hash()] = val
}
-// submits storage node as a candidate to store the object replica in case of
-// shortage.
-func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) {
- n.set(node, false)
-}
-
-// submits storage node as a current object replica holder.
-func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) {
- n.set(node, true)
-}
-
// processStatus returns current processing status of the storage node.
func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
- switch val, ok := n[node.Hash()]; {
- case !ok:
- return nodeNotProcessed
- case val:
- return nodeHoldsObject
- default:
- return nodeDoesNotHoldObject
- }
+ return n[node.Hash()]
}
// SubmitSuccessfulReplication marks given storage node as a current object
@@ -53,5 +38,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
//
// SubmitSuccessfulReplication implements replicator.TaskResult.
func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
- n.submitReplicaHolder(node)
+ n.set(node, nodeHoldsObject)
}
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
index 9dbfd8b9f..5d59604c2 100644
--- a/pkg/services/policer/option.go
+++ b/pkg/services/policer/option.go
@@ -91,7 +91,7 @@ type cfg struct {
func defaultCfg() *cfg {
return &cfg{
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
batchSize: 10,
cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB
sleepDuration: 1 * time.Second,
@@ -143,7 +143,7 @@ func WithPlacementBuilder(v placement.Builder) Option {
}
}
-// WithRemoteObjectHeader returns option to set remote object header receiver of Policer.
+// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer.
func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option {
return func(c *cfg) {
c.remoteHeader = v
diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go
index 363c0b922..c91e7cc7c 100644
--- a/pkg/services/policer/policer.go
+++ b/pkg/services/policer/policer.go
@@ -1,13 +1,13 @@
package policer
import (
+ "fmt"
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
- "go.uber.org/zap"
)
type objectsInWork struct {
@@ -55,12 +55,8 @@ func New(opts ...Option) *Policer {
opts[i](c)
}
- c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))}
-
cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize))
- if err != nil {
- panic(err)
- }
+ assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize))
return &Policer{
cfg: c,
diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go
index e353ea428..049c33753 100644
--- a/pkg/services/policer/policer_test.go
+++ b/pkg/services/policer/policer_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
+ "slices"
"sort"
"testing"
"time"
@@ -36,10 +37,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) {
// Container source and bury function
buryCh := make(chan oid.Address)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -78,6 +79,7 @@ func TestProcessObject(t *testing.T) {
maintenanceNodes []int
wantRemoveRedundant bool
wantReplicateTo []int
+ headResult map[int]error
ecInfo *objectcore.ECInfo
}{
{
@@ -127,7 +129,7 @@ func TestProcessObject(t *testing.T) {
nodeCount: 2,
policy: `REP 2 REP 2`,
placement: [][]int{{0, 1}, {0, 1}},
- wantReplicateTo: []int{1, 1}, // is this actually good?
+ wantReplicateTo: []int{1},
},
{
desc: "lock object must be replicated to all nodes",
@@ -145,6 +147,14 @@ func TestProcessObject(t *testing.T) {
objHolders: []int{1},
maintenanceNodes: []int{2},
},
+ {
+ desc: "preserve local copy when node response with MAINTENANCE",
+ nodeCount: 3,
+ policy: `REP 2`,
+ placement: [][]int{{1, 2}},
+ objHolders: []int{1},
+ headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)},
+ },
{
desc: "lock object must be replicated to all EC nodes",
objType: objectSDK.TypeLock,
@@ -161,6 +171,14 @@ func TestProcessObject(t *testing.T) {
placement: [][]int{{0, 1, 2}},
wantReplicateTo: []int{1, 2},
},
+ {
+ desc: "do not remove local copy when MAINTENANCE status is cached",
+ objType: objectSDK.TypeRegular,
+ nodeCount: 3,
+ policy: `REP 1 REP 1`,
+ placement: [][]int{{1, 2}, {1, 0}},
+ headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)},
+ },
}
for i := range tests {
@@ -174,7 +192,7 @@ func TestProcessObject(t *testing.T) {
nodes[i].SetPublicKey([]byte{byte(i)})
}
for _, i := range ti.maintenanceNodes {
- nodes[i].SetMaintenance()
+ nodes[i].SetStatus(netmap.Maintenance)
}
var policy netmap.PlacementPolicy
@@ -204,11 +222,14 @@ func TestProcessObject(t *testing.T) {
t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a)
return nil, errors.New("unexpected object head")
}
- for _, i := range ti.objHolders {
- if index == i {
- return nil, nil
+ if ti.headResult != nil {
+ if err, ok := ti.headResult[index]; ok {
+ return nil, err
}
}
+ if slices.Contains(ti.objHolders, index) {
+ return nil, nil
+ }
return nil, new(apistatus.ObjectNotFound)
}
@@ -217,14 +238,14 @@ func TestProcessObject(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
if id.Equals(addr.Container()) {
return cnr, nil
}
t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container())
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -282,10 +303,10 @@ func TestProcessObjectError(t *testing.T) {
cnr := &container.Container{}
cnr.Value.Init()
source := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return nil, new(apistatus.ContainerNotFound)
},
}
@@ -330,10 +351,10 @@ func TestIteratorContract(t *testing.T) {
}
containerSrc := containerSrc{
- get: func(id cid.ID) (*container.Container, error) {
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -422,18 +443,22 @@ func (it *sliceKeySpaceIterator) Rewind() {
}
type containerSrc struct {
- get func(id cid.ID) (*container.Container, error)
- deletionInfo func(id cid.ID) (*container.DelInfo, error)
+ get func(ctx context.Context, id cid.ID) (*container.Container, error)
+ deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error)
}
-func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) }
+func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
+ return f.get(ctx, id)
+}
-func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) }
+func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ return f.deletionInfo(ctx, id)
+}
// placementBuilderFunc is a placement.Builder backed by a function
type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
-func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return f(c, o, p)
}
diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go
index a5ebb0010..635a5683b 100644
--- a/pkg/services/policer/process.go
+++ b/pkg/services/policer/process.go
@@ -7,17 +7,20 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
func (p *Policer) Run(ctx context.Context) {
p.shardPolicyWorker(ctx)
- p.log.Info(logs.PolicerRoutineStopped)
+ p.log.Info(ctx, logs.PolicerRoutineStopped)
}
func (p *Policer) shardPolicyWorker(ctx context.Context) {
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String())
for {
select {
case <-ctx.Done():
@@ -33,7 +36,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
continue
}
- p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
+ p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
}
skipMap := newSkipMap()
@@ -59,9 +62,9 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
if p.objsInWork.add(addr.Address) {
err := p.processObject(ctx, addr)
if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
- p.log.Error(logs.PolicerUnableToProcessObj,
+ p.log.Error(ctx, logs.PolicerUnableToProcessObj,
zap.Stringer("object", addr.Address),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
p.cache.Add(addr.Address, time.Now())
p.objsInWork.remove(addr.Address)
@@ -69,7 +72,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
}
})
if err != nil {
- p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err))
+ p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err))
}
}
}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 7e5c6e093..8c6f0df06 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
@@ -27,7 +26,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(logs.ReplicatorFinishWork,
+ p.log.Debug(ctx, logs.ReplicatorFinishWork,
zap.Uint32("amount of unfinished replicas", task.NumCopies),
)
}()
@@ -43,10 +42,9 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
var err error
task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr)
if err != nil {
- p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
return
}
@@ -65,7 +63,6 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
log := p.log.With(
zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])),
zap.Stringer("object", task.Addr),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
callCtx, cancel := context.WithTimeout(ctx, p.putTimeout)
@@ -75,11 +72,11 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
cancel()
if err != nil {
- log.Error(logs.ReplicatorCouldNotReplicateObject,
- zap.String("error", err.Error()),
+ log.Error(ctx, logs.ReplicatorCouldNotReplicateObject,
+ zap.Error(err),
)
} else {
- log.Debug(logs.ReplicatorObjectSuccessfullyReplicated)
+ log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated)
task.NumCopies--
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
index d178700f6..216fe4919 100644
--- a/pkg/services/replicator/pull.go
+++ b/pkg/services/replicator/pull.go
@@ -3,11 +3,12 @@ package replicator
import (
"context"
"errors"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
@@ -21,7 +22,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
}()
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask",
@@ -42,31 +43,24 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
if err == nil {
break
}
- var endpoints []string
- node.IterateNetworkEndpoints(func(s string) bool {
- endpoints = append(endpoints, s)
- return false
- })
- p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ endpoints := slices.Collect(node.NetworkEndpoints())
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
zap.Stringer("object", task.Addr),
zap.Error(err),
- zap.Strings("endpoints", endpoints),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Strings("endpoints", endpoints))
}
if obj == nil {
- p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
zap.Stringer("object", task.Addr),
- zap.Error(errFailedToGetObjectFromAnyNode),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(errFailedToGetObjectFromAnyNode))
return
}
- err := engine.Put(ctx, p.localStorage, obj)
+ err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
- p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
}
}
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
index c06ec3f65..bcad8471d 100644
--- a/pkg/services/replicator/put.go
+++ b/pkg/services/replicator/put.go
@@ -5,8 +5,8 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -19,7 +19,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
}()
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask",
@@ -30,18 +30,16 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
defer span.End()
if task.Obj == nil {
- p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(errObjectNotDefined),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(errObjectNotDefined))
return
}
- err := engine.Put(ctx, p.localStorage, task.Obj)
+ err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
- p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.Error(err))
}
}
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index f2f86daf0..a940cef37 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -7,7 +7,6 @@ import (
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
// Replicator represents the utility that replicates
@@ -45,8 +44,6 @@ func New(opts ...Option) *Replicator {
opts[i](c)
}
- c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))}
-
return &Replicator{
cfg: c,
}
diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go
index d2b5b2506..a03f8dcaa 100644
--- a/pkg/services/replicator/task.go
+++ b/pkg/services/replicator/task.go
@@ -1,6 +1,7 @@
package replicator
import (
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -16,4 +17,6 @@ type Task struct {
Obj *objectSDK.Object
// Nodes is a list of potential object holders.
Nodes []netmap.NodeInfo
+
+ Container containerSDK.Container
}
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index 76c220fab..f0591de71 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -4,10 +4,10 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"go.uber.org/zap"
)
@@ -33,10 +33,7 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log
}
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- s.log.Debug(logs.ServingRequest,
- zap.String("component", "SessionService"),
- zap.String("request", "Create"),
- )
+ s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create"))
respBody, err := s.exec.Create(ctx, req.GetBody())
if err != nil {
diff --git a/pkg/services/session/server.go b/pkg/services/session/server.go
index 9e44ae667..e8555a7c9 100644
--- a/pkg/services/session/server.go
+++ b/pkg/services/session/server.go
@@ -3,7 +3,7 @@ package session
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
// Server is an interface of the FrostFS API Session service server.
diff --git a/pkg/services/session/sign.go b/pkg/services/session/sign.go
index 690fff896..3664c1403 100644
--- a/pkg/services/session/sign.go
+++ b/pkg/services/session/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
type signService struct {
diff --git a/pkg/services/session/storage/persistent/executor.go b/pkg/services/session/storage/persistent/executor.go
index 21f55a7d1..ea0233f9a 100644
--- a/pkg/services/session/storage/persistent/executor.go
+++ b/pkg/services/session/storage/persistent/executor.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.etcd.io/bbolt"
diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go
index 124d36930..f80ecb591 100644
--- a/pkg/services/session/storage/persistent/executor_test.go
+++ b/pkg/services/session/storage/persistent/executor_test.go
@@ -8,8 +8,8 @@ import (
"path/filepath"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go
index 411734ea1..60db97f90 100644
--- a/pkg/services/session/storage/persistent/options.go
+++ b/pkg/services/session/storage/persistent/options.go
@@ -19,7 +19,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- l: &logger.Logger{Logger: zap.L()},
+ l: logger.NewLoggerWrapper(zap.L()),
timeout: 100 * time.Millisecond,
}
}
diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go
index 71711e371..132d62445 100644
--- a/pkg/services/session/storage/persistent/storage.go
+++ b/pkg/services/session/storage/persistent/storage.go
@@ -1,6 +1,7 @@
package persistent
import (
+ "context"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
@@ -63,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
// enable encryption if it
// was configured so
if cfg.privateKey != nil {
- rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8)
+ rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8)
cfg.privateKey.D.FillBytes(rawKey)
c, err := aes.NewCipher(rawKey)
@@ -105,7 +106,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok
return err
})
if err != nil {
- s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage,
+ s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage,
zap.Error(err),
zap.Stringer("ownerID", ownerID),
zap.String("tokenID", hex.EncodeToString(tokenID)),
@@ -130,7 +131,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
if epochFromToken(v) <= epoch {
err = c.Delete()
if err != nil {
- s.l.Error(logs.PersistentCouldNotDeleteSToken,
+ s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken,
zap.String("token_id", hex.EncodeToString(k)),
)
}
@@ -141,7 +142,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
})
})
if err != nil {
- s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens,
+ s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens,
zap.Uint64("epoch", epoch),
)
}
diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go
index cd498709c..423e579d7 100644
--- a/pkg/services/session/storage/temporary/executor.go
+++ b/pkg/services/session/storage/temporary/executor.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -38,7 +38,7 @@ func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody)
s.mtx.Lock()
s.tokens[key{
tokenID: base58.Encode(uidBytes),
- ownerID: base58.Encode(id.WalletBytes()),
+ ownerID: id.EncodeToString(),
}] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration())
s.mtx.Unlock()
diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go
index ee93dee71..c9da6b842 100644
--- a/pkg/services/session/storage/temporary/storage.go
+++ b/pkg/services/session/storage/temporary/storage.go
@@ -9,7 +9,9 @@ import (
)
type key struct {
+ // nolint:unused
tokenID string
+ // nolint:unused
ownerID string
}
@@ -39,7 +41,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken
s.mtx.RLock()
t := s.tokens[key{
tokenID: base58.Encode(tokenID),
- ownerID: base58.Encode(ownerID.WalletBytes()),
+ ownerID: ownerID.EncodeToString(),
}]
s.mtx.RUnlock()
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index 693b16e60..58757ff6d 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -22,7 +22,7 @@ import (
)
func (s *Service) newAPERequest(ctx context.Context, namespace string,
- cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+ cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) (aperequest.Request, error) {
schemaMethod, err := converter.SchemaMethodFromACLOperation(operation)
if err != nil {
@@ -36,7 +36,7 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()),
nativeschema.PropertyKeyActorRole: schemaRole,
}
- reqProps, err = s.fillWithUserClaimTags(reqProps, publicKey)
+ reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey)
if err != nil {
return aperequest.Request{}, err
}
@@ -53,15 +53,19 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
}
+ resProps := map[string]string{
+ nativeschema.ProperyKeyTreeID: treeID,
+ }
+
return aperequest.NewRequest(
schemaMethod,
- aperequest.NewResource(resourceName, make(map[string]string)),
+ aperequest.NewResource(resourceName, resProps),
reqProps,
), nil
}
func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
- container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+ container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) error {
namespace := ""
cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns")
@@ -69,27 +73,27 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
namespace = cntNamespace
}
- request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey)
+ request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey)
if err != nil {
return fmt.Errorf("failed to create ape request: %w", err)
}
- return s.apeChecker.CheckAPE(checkercore.CheckPrm{
- Request: request,
- Namespace: namespace,
- Container: cid,
- PublicKey: publicKey,
- BearerToken: bt,
- SoftAPECheck: false,
+ return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{
+ Request: request,
+ Namespace: namespace,
+ Container: cid,
+ ContainerOwner: container.Value.Owner(),
+ PublicKey: publicKey,
+ BearerToken: bt,
})
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (s *Service) fillWithUserClaimTags(reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
+func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
- props, err := aperequest.FormFrostfsIDRequestProperties(s.frostfsidSubjectProvider, publicKey)
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go
new file mode 100644
index 000000000..7b209fd47
--- /dev/null
+++ b/pkg/services/tree/ape_test.go
@@ -0,0 +1,246 @@
+package tree
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy"
+
+ senderPrivateKey, _ = keys.NewPrivateKey()
+
+ senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes())
+
+ rootCnr = &core.Container{Value: containerSDK.Container{}}
+)
+
+type frostfsIDProviderMock struct {
+ subjects map[util.Uint160]*client.Subject
+ subjectsExtended map[util.Uint160]*client.SubjectExtended
+}
+
+func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
+ v, ok := f.subjects[key]
+ if !ok {
+ return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
+ }
+ return v, nil
+}
+
+func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
+ v, ok := f.subjectsExtended[key]
+ if !ok {
+ return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
+ }
+ return v, nil
+}
+
+var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil)
+
+func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock {
+ return &frostfsIDProviderMock{
+ subjects: map[util.Uint160]*client.Subject{
+ scriptHashFromSenderKey(t, senderKey): {
+ Namespace: "testnamespace",
+ Name: "test",
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExtended: map[util.Uint160]*client.SubjectExtended{
+ scriptHashFromSenderKey(t, senderKey): {
+ Namespace: "testnamespace",
+ Name: "test",
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 1,
+ Name: "test",
+ Namespace: "testnamespace",
+ KV: map[string]string{
+ "attr1": "value1",
+ "attr2": "value2",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
+ pk, err := keys.NewPublicKeyFromString(senderKey)
+ require.NoError(t, err)
+ return pk.GetScriptHash()
+}
+
+type stMock struct{}
+
+func (m *stMock) CurrentEpoch() uint64 {
+ return 8
+}
+
+func TestCheckAPE(t *testing.T) {
+ cid := cid.ID{}
+ _ = cid.DecodeString(containerID)
+
+ t.Run("treeID rule", func(t *testing.T) {
+ los := inmemory.NewInmemoryLocalStorage()
+ mcs := inmemory.NewInmemoryMorphRuleChainStorage()
+ fid := newFrostfsIDProviderMock(t)
+ s := Service{
+ cfg: cfg{
+ frostfsidSubjectProvider: fid,
+ },
+ apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
+ }
+
+ mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.QuotaLimitReached,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindResource,
+ Key: nativeschema.ProperyKeyTreeID,
+ Value: versionTreeID,
+ },
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey())
+
+ var chErr *checkercore.ChainRouterError
+ require.ErrorAs(t, err, &chErr)
+ require.Equal(t, chain.QuotaLimitReached, chErr.Status())
+ })
+
+ t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) {
+ los := inmemory.NewInmemoryLocalStorage()
+ mcs := inmemory.NewInmemoryMorphRuleChainStorage()
+ fid := newFrostfsIDProviderMock(t)
+ s := Service{
+ cfg: cfg{
+ frostfsidSubjectProvider: fid,
+ },
+ apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
+ }
+
+ los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringNotEquals,
+ Kind: chain.KindResource,
+ Key: nativeschema.PropertyKeyObjectType,
+ Value: "TOMBSTONE",
+ },
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
+ require.NoError(t, err)
+ })
+
+ t.Run("delete rule won't affect tree add", func(t *testing.T) {
+ los := inmemory.NewInmemoryLocalStorage()
+ mcs := inmemory.NewInmemoryMorphRuleChainStorage()
+ fid := newFrostfsIDProviderMock(t)
+ s := Service{
+ cfg: cfg{
+ frostfsidSubjectProvider: fid,
+ },
+ apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
+ }
+
+ los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringNotEquals,
+ Kind: chain.KindResource,
+ Key: nativeschema.PropertyKeyObjectType,
+ Value: "TOMBSTONE",
+ },
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
+ require.NoError(t, err)
+ })
+}
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index 38501b852..a11700771 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -8,19 +8,18 @@ import (
"sync"
"time"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
- tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials/insecure"
)
type clientCache struct {
sync.Mutex
simplelru.LRU[string, cacheItem]
key *ecdsa.PrivateKey
+ ds *internalNet.DialerSource
}
type cacheItem struct {
@@ -36,7 +35,7 @@ const (
var errRecentlyFailed = errors.New("client has recently failed")
-func (c *clientCache) init(pk *ecdsa.PrivateKey) {
+func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) {
if conn := value.cc; conn != nil {
_ = conn.Close()
@@ -44,11 +43,12 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey) {
})
c.LRU = *l
c.key = pk
+ c.ds = ds
}
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
c.Lock()
- ccInt, ok := c.LRU.Get(netmapAddr)
+ ccInt, ok := c.Get(netmapAddr)
c.Unlock()
if ok {
@@ -66,14 +66,19 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
}
}
- cc, err := c.dialTreeService(ctx, netmapAddr)
+ var netAddr network.Address
+ if err := netAddr.FromString(netmapAddr); err != nil {
+ return nil, err
+ }
+
+ cc, err := dialTreeService(ctx, netAddr, c.key, c.ds)
lastTry := time.Now()
c.Lock()
if err != nil {
- c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
+ c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
} else {
- c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
+ c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
}
c.Unlock()
@@ -83,46 +88,3 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
return NewTreeServiceClient(cc), nil
}
-
-func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
- var netAddr network.Address
- if err := netAddr.FromString(netmapAddr); err != nil {
- return nil, err
- }
-
- opts := []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(
- metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInteceptor(),
- ),
- grpc.WithChainStreamInterceptor(
- metrics.NewStreamClientInterceptor(),
- tracing.NewStreamClientInterceptor(),
- ),
- }
-
- if !netAddr.IsTLSEnabled() {
- opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
- }
-
- req := &HealthcheckRequest{
- Body: &HealthcheckRequest_Body{},
- }
- if err := SignMessage(req, c.key); err != nil {
- return nil, err
- }
-
- cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
- if err != nil {
- return nil, err
- }
-
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- defer cancel()
- // perform some request to check connection
- if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
- _ = cc.Close()
- return nil, err
- }
- return cc, nil
-}
diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go
index 435257550..c641a21a2 100644
--- a/pkg/services/tree/container.go
+++ b/pkg/services/tree/container.go
@@ -2,6 +2,7 @@ package tree
import (
"bytes"
+ "context"
"crypto/sha256"
"fmt"
"sync"
@@ -32,13 +33,13 @@ type containerCacheItem struct {
const defaultContainerCacheSize = 10
// getContainerNodes returns nodes in the container and a position of local key in the list.
-func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
- nm, err := s.nmSource.GetNetMap(0)
+func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
+ nm, err := s.nmSource.GetNetMap(ctx, 0)
if err != nil {
return nil, -1, fmt.Errorf("can't get netmap: %w", err)
}
- cnr, err := s.cnrSource.Get(cid)
+ cnr, err := s.cnrSource.Get(ctx, cid)
if err != nil {
return nil, -1, fmt.Errorf("can't get container: %w", err)
}
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index 95bdda34b..e7a13827e 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -131,7 +131,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) {
t.Run("boltdb forest", func(t *testing.T) {
p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
require.NoError(t, p.Open(context.Background(), 0o644))
- require.NoError(t, p.Init())
+ require.NoError(t, p.Init(context.Background()))
testGetSubTreeOrderAsc(t, p)
})
}
diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go
index 0f0e4ee57..07503f8c3 100644
--- a/pkg/services/tree/metrics.go
+++ b/pkg/services/tree/metrics.go
@@ -6,6 +6,7 @@ type MetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
+ AddOperation(string, string)
}
type defaultMetricsRegister struct{}
@@ -13,3 +14,4 @@ type defaultMetricsRegister struct{}
func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {}
+func (defaultMetricsRegister) AddOperation(string, string) {}
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 6a20fe5cc..56cbcc081 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -1,9 +1,12 @@
package tree
import (
+ "context"
"crypto/ecdsa"
+ "sync/atomic"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -17,12 +20,12 @@ import (
type ContainerSource interface {
container.Source
- DeletionInfo(cid.ID) (*container.DelInfo, error)
+ DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error)
// List must return list of all the containers in the FrostFS network
// at the moment of a call and any error that does not allow fetching
// container information.
- List() ([]cid.ID, error)
+ List(ctx context.Context) ([]cid.ID, error)
}
type cfg struct {
@@ -33,19 +36,20 @@ type cfg struct {
nmSource netmap.Source
cnrSource ContainerSource
frostfsidSubjectProvider frostfsidcore.SubjectProvider
- eaclSource container.EACLSource
forest pilorama.Forest
// replication-related parameters
replicatorChannelCapacity int
replicatorWorkerCount int
replicatorTimeout time.Duration
containerCacheSize int
- authorizedKeys [][]byte
+ authorizedKeys atomic.Pointer[[][]byte]
+ syncBatchSize int
localOverrideStorage policyengine.LocalOverrideStorage
morphChainStorage policyengine.MorphRuleChainStorageReader
metrics MetricsRegister
+ ds *net.DialerSource
}
// Option represents configuration option for a tree service.
@@ -65,14 +69,6 @@ func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option
}
}
-// WithEACLSource sets a eACL table source for a tree service.
-// This option is required.
-func WithEACLSource(src container.EACLSource) Option {
- return func(c *cfg) {
- c.eaclSource = src
- }
-}
-
// WithNetmapSource sets a netmap source for a tree service.
// This option is required.
func WithNetmapSource(src netmap.Source) Option {
@@ -120,6 +116,12 @@ func WithReplicationWorkerCount(n int) Option {
}
}
+func WithSyncBatchSize(n int) Option {
+ return func(c *cfg) {
+ c.syncBatchSize = n
+ }
+}
+
func WithContainerCacheSize(n int) Option {
return func(c *cfg) {
if n > 0 {
@@ -146,10 +148,7 @@ func WithMetrics(v MetricsRegister) Option {
// keys that have rights to use Tree service.
func WithAuthorizedKeys(keys keys.PublicKeys) Option {
return func(c *cfg) {
- c.authorizedKeys = nil
- for _, key := range keys {
- c.authorizedKeys = append(c.authorizedKeys, key.Bytes())
- }
+ c.authorizedKeys.Store(fromPublicKeys(keys))
}
}
@@ -170,3 +169,9 @@ func WithNetmapState(state netmap.State) Option {
c.state = state
}
}
+
+func WithDialerSource(ds *net.DialerSource) Option {
+ return func(c *cfg) {
+ c.ds = ds
+ }
+}
diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go
new file mode 100644
index 000000000..8f21686df
--- /dev/null
+++ b/pkg/services/tree/qos.go
@@ -0,0 +1,101 @@
+package tree
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+)
+
+var _ TreeServiceServer = (*ioTagAdjust)(nil)
+
+type AdjustIOTag interface {
+ AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
+}
+
+type ioTagAdjust struct {
+ s TreeServiceServer
+ a AdjustIOTag
+}
+
+func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer {
+ return &ioTagAdjust{
+ s: s,
+ a: a,
+ }
+}
+
+func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Add(ctx, req)
+}
+
+func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.AddByPath(ctx, req)
+}
+
+func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Apply(ctx, req)
+}
+
+func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.GetNodeByPath(ctx, req)
+}
+
+func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
+ ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
+ return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{
+ sender: srv,
+ ServerStream: srv,
+ ctxF: func() context.Context { return ctx },
+ })
+}
+
+func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
+ ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
+ return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{
+ sender: srv,
+ ServerStream: srv,
+ ctxF: func() context.Context { return ctx },
+ })
+}
+
+func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Healthcheck(ctx, req)
+}
+
+func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Move(ctx, req)
+}
+
+func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Remove(ctx, req)
+}
+
+func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.TreeList(ctx, req)
+}
+
+type qosSend[T any] interface {
+ Send(T) error
+}
+
+type qosServerWrapper[T any] struct {
+ grpc.ServerStream
+ sender qosSend[T]
+ ctxF func() context.Context
+}
+
+func (w *qosServerWrapper[T]) Send(resp T) error {
+ return w.sender.Send(resp)
+}
+
+func (w *qosServerWrapper[T]) Context() context.Context {
+ return w.ctxF()
+}
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index ec41a60d4..647f8cb30 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -6,19 +6,32 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
+ "google.golang.org/grpc"
)
var errNoSuitableNode = errors.New("no node was found to execute the request")
+func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) {
+ var resp *Resp
+ var outErr error
+ err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ resp, outErr = callback(c, fCtx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
+}
+
// forEachNode executes callback for each node in the container until true is returned.
// Returns errNoSuitableNode if there was no successful attempt to dial any node.
-func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error {
+func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error {
for _, n := range cntNodes {
if bytes.Equal(n.PublicKey(), s.rawPub) {
return nil
@@ -28,25 +41,15 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
var called bool
for _, n := range cntNodes {
var stop bool
- n.IterateNetworkEndpoints(func(endpoint string) bool {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
- trace.WithAttributes(
- attribute.String("endpoint", endpoint),
- ))
- defer span.End()
-
- c, err := s.cache.get(ctx, endpoint)
- if err != nil {
- return false
+ for endpoint := range n.NetworkEndpoints() {
+ stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool {
+ called = true
+ return f(fCtx, c)
+ })
+ if called {
+ break
}
-
- s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
-
- called = true
- stop = f(c)
- return true
- })
+ }
if stop {
return nil
}
@@ -56,3 +59,19 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
}
return nil
}
+
+func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
+ trace.WithAttributes(
+ attribute.String("endpoint", endpoint),
+ ))
+ defer span.End()
+
+ c, err := s.cache.get(ctx, endpoint)
+ if err != nil {
+ return false
+ }
+
+ s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint))
+ return f(ctx, c)
+}
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index 95c8f8013..ee40884eb 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -40,6 +39,7 @@ const (
defaultReplicatorCapacity = 64
defaultReplicatorWorkerCount = 64
defaultReplicatorSendTimeout = time.Second * 5
+ defaultSyncBatchSize = 1000
)
func (s *Service) localReplicationWorker(ctx context.Context) {
@@ -57,8 +57,8 @@ func (s *Service) localReplicationWorker(ctx context.Context) {
err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
if err != nil {
- s.log.Error(logs.TreeFailedToApplyReplicatedOperation,
- zap.String("err", err.Error()))
+ s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation,
+ zap.Error(err))
}
span.End()
}
@@ -89,41 +89,23 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
var lastErr error
var lastAddr string
- n.IterateNetworkEndpoints(func(addr string) bool {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
- trace.WithAttributes(
- attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
- attribute.String("address", addr),
- ),
- )
- defer span.End()
-
+ for addr := range n.NetworkEndpoints() {
lastAddr = addr
-
- c, err := s.cache.get(ctx, addr)
- if err != nil {
- lastErr = fmt.Errorf("can't create client: %w", err)
- return false
+ lastErr = s.apply(ctx, n, addr, req)
+ if lastErr == nil {
+ break
}
-
- ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
- _, lastErr = c.Apply(ctx, req)
- cancel()
-
- return lastErr == nil
- })
+ }
if lastErr != nil {
if errors.Is(lastErr, errRecentlyFailed) {
- s.log.Debug(logs.TreeDoNotSendUpdateToTheNode,
- zap.String("last_error", lastErr.Error()),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode,
+ zap.String("last_error", lastErr.Error()))
} else {
- s.log.Warn(logs.TreeFailedToSentUpdateToTheNode,
+ s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode,
zap.String("last_error", lastErr.Error()),
zap.String("address", lastAddr),
- zap.String("key", hex.EncodeToString(n.PublicKey())),
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ zap.String("key", hex.EncodeToString(n.PublicKey())))
}
s.metrics.AddReplicateTaskDuration(time.Since(start), false)
return lastErr
@@ -132,6 +114,26 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
return nil
}
+func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
+ attribute.String("address", addr),
+ ),
+ )
+ defer span.End()
+
+ c, err := s.cache.get(ctx, addr)
+ if err != nil {
+ return fmt.Errorf("can't create client: %w", err)
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
+ _, err = c.Apply(ctx, req)
+ cancel()
+ return err
+}
+
func (s *Service) replicateLoop(ctx context.Context) {
for range s.replicatorWorkerCount {
go s.replicationWorker(ctx)
@@ -151,10 +153,10 @@ func (s *Service) replicateLoop(ctx context.Context) {
return
case op := <-s.replicateCh:
start := time.Now()
- err := s.replicate(op)
+ err := s.replicate(ctx, op)
if err != nil {
- s.log.Error(logs.TreeErrorDuringReplication,
- zap.String("err", err.Error()),
+ s.log.Error(ctx, logs.TreeErrorDuringReplication,
+ zap.Error(err),
zap.Stringer("cid", op.cid),
zap.String("treeID", op.treeID))
}
@@ -163,14 +165,14 @@ func (s *Service) replicateLoop(ctx context.Context) {
}
}
-func (s *Service) replicate(op movePair) error {
+func (s *Service) replicate(ctx context.Context, op movePair) error {
req := newApplyRequest(&op)
err := SignMessage(req, s.key)
if err != nil {
return fmt.Errorf("can't sign data: %w", err)
}
- nodes, localIndex, err := s.getContainerNodes(op.cid)
+ nodes, localIndex, err := s.getContainerNodes(ctx, op.cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -204,7 +206,7 @@ func newApplyRequest(op *movePair) *ApplyRequest {
TreeId: op.treeID,
Operation: &LogMove{
ParentId: op.op.Parent,
- Meta: op.op.Meta.Bytes(),
+ Meta: op.op.Bytes(),
ChildId: op.op.Child,
},
},
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 60bb1a6ad..3994d6973 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -5,16 +5,19 @@ import (
"context"
"errors"
"fmt"
- "sort"
+ "slices"
"sync"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
@@ -55,17 +58,19 @@ func New(opts ...Option) *Service {
s.replicatorChannelCapacity = defaultReplicatorCapacity
s.replicatorWorkerCount = defaultReplicatorWorkerCount
s.replicatorTimeout = defaultReplicatorSendTimeout
+ s.syncBatchSize = defaultSyncBatchSize
s.metrics = defaultMetricsRegister{}
+ s.authorizedKeys.Store(&[][]byte{})
for i := range opts {
opts[i](&s.cfg)
}
if s.log == nil {
- s.log = &logger.Logger{Logger: zap.NewNop()}
+ s.log = logger.NewLoggerWrapper(zap.NewNop())
}
- s.cache.init(s.key)
+ s.cache.init(s.key, s.ds)
s.closeCh = make(chan struct{})
s.replicateCh = make(chan movePair, s.replicatorChannelCapacity)
s.replicateLocalCh = make(chan applyOp)
@@ -82,6 +87,7 @@ func New(opts ...Option) *Service {
// Start starts the service.
func (s *Service) Start(ctx context.Context) {
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String())
go s.replicateLoop(ctx)
go s.syncLoop(ctx)
@@ -101,6 +107,7 @@ func (s *Service) Shutdown() {
}
func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
+ defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -112,26 +119,17 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *AddResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.Add(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).Add)
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
@@ -153,6 +151,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
}
func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
+ defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -164,26 +163,17 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *AddByPathResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.AddByPath(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).AddByPath)
}
meta := protoToMeta(b.GetMeta())
@@ -217,6 +207,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
}
func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
+ defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -228,26 +219,17 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *RemoveResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.Remove(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).Remove)
}
if b.GetNodeId() == pilorama.RootID {
@@ -270,6 +252,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
// Move applies client operation to the specified tree and pushes in queue
// for replication on other nodes.
func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
+ defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -281,26 +264,17 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *MoveResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.Move(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).Move)
}
if b.GetNodeId() == pilorama.RootID {
@@ -322,6 +296,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
}
func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
+ defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -333,26 +308,17 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *GetNodeByPathResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.GetNodeByPath(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).GetNodeByPath)
}
attr := b.GetPathAttribute()
@@ -381,14 +347,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
} else {
var metaValue []KeyValue
for _, kv := range m.Items {
- for _, attr := range b.GetAttributes() {
- if kv.Key == attr {
- metaValue = append(metaValue, KeyValue{
- Key: kv.Key,
- Value: kv.Value,
- })
- break
- }
+ if slices.Contains(b.GetAttributes(), kv.Key) {
+ metaValue = append(metaValue, KeyValue{
+ Key: kv.Key,
+ Value: kv.Value,
+ })
}
}
x.Meta = metaValue
@@ -404,6 +367,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
}
func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
+ defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -415,20 +379,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return err
}
- err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(srv.Context(), cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetSubTreeClient
var outErr error
- err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
- cli, outErr = c.GetSubTree(srv.Context(), req)
+ err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ cli, outErr = c.GetSubTree(fCtx, req)
return true
})
if err != nil {
@@ -450,7 +414,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
type stackItem struct {
values []pilorama.MultiNodeInfo
parent pilorama.MultiNode
- last *string
+ last *pilorama.Cursor
}
func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
@@ -474,10 +438,8 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
}
if ms == nil {
ms = m.Items
- } else {
- if len(m.Items) != 1 {
- return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
- }
+ } else if len(m.Items) != 1 {
+ return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
}
ts = append(ts, m.Time)
ps = append(ps, p)
@@ -501,14 +463,13 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
break
}
- nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
+ var err error
+ item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
if err != nil {
return err
}
- item.values = nodes
- item.last = last
- if len(nodes) == 0 {
+ if len(item.values) == 0 {
stack = stack[:len(stack)-1]
continue
}
@@ -620,10 +581,9 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di
if len(nodes) == 0 {
return nodes, nil
}
- less := func(i, j int) bool {
- return bytes.Compare(nodes[i].Meta.GetAttr(pilorama.AttributeFilename), nodes[j].Meta.GetAttr(pilorama.AttributeFilename)) < 0
- }
- sort.Slice(nodes, less)
+ slices.SortFunc(nodes, func(a, b pilorama.NodeInfo) int {
+ return bytes.Compare(a.Meta.GetAttr(pilorama.AttributeFilename), b.Meta.GetAttr(pilorama.AttributeFilename))
+ })
return nodes, nil
default:
return nil, fmt.Errorf("unsupported order direction: %s", d.String())
@@ -631,7 +591,8 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di
}
// Apply locally applies operation from the remote node to the tree.
-func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+ defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx))
err := verifyMessage(req)
if err != nil {
return nil, err
@@ -644,7 +605,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e
key := req.GetSignature().GetKey()
- _, pos, _, err := s.getContainerInfo(cid, key)
+ _, pos, _, err := s.getContainerInfo(ctx, cid, key)
if err != nil {
return nil, err
}
@@ -675,6 +636,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e
}
func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
+ defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -686,15 +648,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
return err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(srv.Context(), cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetOpLogClient
var outErr error
- err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
- cli, outErr = c.GetOpLog(srv.Context(), req)
+ err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ cli, outErr = c.GetOpLog(fCtx, req)
return true
})
if err != nil {
@@ -725,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
Body: &GetOpLogResponse_Body{
Operation: &LogMove{
ParentId: lm.Parent,
- Meta: lm.Meta.Bytes(),
+ Meta: lm.Bytes(),
ChildId: lm.Child,
},
},
@@ -739,6 +701,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
}
func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
+ defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -758,21 +721,12 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *TreeListResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.TreeList(ctx, req)
- return outErr == nil
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).TreeList)
}
ids, err := s.forest.TreeList(ctx, cid)
@@ -809,8 +763,8 @@ func metaToProto(arr []pilorama.KeyValue) []KeyValue {
// getContainerInfo returns the list of container nodes, position in the container for the node
// with pub key and total amount of nodes in all replicas.
-func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
- cntNodes, _, err := s.getContainerNodes(cid)
+func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
+ cntNodes, _, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, 0, 0, err
}
@@ -830,3 +784,15 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec
return new(HealthcheckResponse), nil
}
+
+func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) {
+ s.authorizedKeys.Store(fromPublicKeys(newKeys))
+}
+
+func fromPublicKeys(keys keys.PublicKeys) *[][]byte {
+ buff := make([][]byte, len(keys))
+ for i, k := range keys {
+ buff[i] = k.Bytes()
+ }
+ return &buff
+}
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index 7b6abb1dd..88d002621 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -5,9 +5,9 @@ package tree
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -181,24 +181,51 @@ func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"parentId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
out.RawString(prefix)
- out.Uint64(x.ParentId)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"meta\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -210,9 +237,18 @@ func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"bearerToken\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
out.RawString(prefix)
- out.Base64Bytes(x.BearerToken)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -245,7 +281,13 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -257,7 +299,15 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentId":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.ParentId = f
}
case "meta":
@@ -277,7 +327,13 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "bearerToken":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.BearerToken = f
}
}
@@ -413,14 +469,25 @@ func (x *AddRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -555,11 +622,19 @@ func (x *AddResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"nodeId\":"
- out.RawString(prefix[1:])
- out.Uint64(x.NodeId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -592,7 +667,15 @@ func (x *AddResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "nodeId":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.NodeId = f
}
}
@@ -728,14 +811,25 @@ func (x *AddResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -976,24 +1070,49 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"pathAttribute\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"pathAttribute\":"
out.RawString(prefix)
out.String(x.PathAttribute)
}
{
- const prefix string = ",\"path\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"path\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Path {
@@ -1005,7 +1124,12 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"meta\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -1017,9 +1141,18 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"bearerToken\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
out.RawString(prefix)
- out.Base64Bytes(x.BearerToken)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -1052,7 +1185,13 @@ func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -1097,7 +1236,13 @@ func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "bearerToken":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.BearerToken = f
}
}
@@ -1233,14 +1378,25 @@ func (x *AddByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1397,23 +1553,38 @@ func (x *AddByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"nodes\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodes\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Nodes {
if i != 0 {
out.RawByte(',')
}
- out.Uint64(x.Nodes[i])
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Nodes[i], 10)
+ out.RawByte('"')
}
out.RawByte(']')
}
{
- const prefix string = ",\"parentId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
out.RawString(prefix)
- out.Uint64(x.ParentId)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -1449,7 +1620,15 @@ func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
list = append(list, f)
in.WantComma()
}
@@ -1459,7 +1638,15 @@ func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentId":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.ParentId = f
}
}
@@ -1595,14 +1782,25 @@ func (x *AddByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1797,26 +1995,57 @@ func (x *RemoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"nodeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
out.RawString(prefix)
- out.Uint64(x.NodeId)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"bearerToken\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
out.RawString(prefix)
- out.Base64Bytes(x.BearerToken)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -1849,7 +2078,13 @@ func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -1861,13 +2096,27 @@ func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "nodeId":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.NodeId = f
}
case "bearerToken":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.BearerToken = f
}
}
@@ -2003,14 +2252,25 @@ func (x *RemoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2284,14 +2544,25 @@ func (x *RemoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2532,29 +2803,63 @@ func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"parentId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
out.RawString(prefix)
- out.Uint64(x.ParentId)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"nodeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
out.RawString(prefix)
- out.Uint64(x.NodeId)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"meta\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -2566,9 +2871,18 @@ func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"bearerToken\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
out.RawString(prefix)
- out.Base64Bytes(x.BearerToken)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -2601,7 +2915,13 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -2613,13 +2933,29 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentId":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.ParentId = f
}
case "nodeId":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.NodeId = f
}
case "meta":
@@ -2639,7 +2975,13 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "bearerToken":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.BearerToken = f
}
}
@@ -2775,14 +3117,25 @@ func (x *MoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3056,14 +3409,25 @@ func (x *MoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3338,24 +3702,49 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"pathAttribute\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"pathAttribute\":"
out.RawString(prefix)
out.String(x.PathAttribute)
}
{
- const prefix string = ",\"path\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"path\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Path {
@@ -3367,7 +3756,12 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"attributes\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"attributes\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Attributes {
@@ -3379,19 +3773,38 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"latestOnly\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"latestOnly\":"
out.RawString(prefix)
out.Bool(x.LatestOnly)
}
{
- const prefix string = ",\"allAttributes\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"allAttributes\":"
out.RawString(prefix)
out.Bool(x.AllAttributes)
}
{
- const prefix string = ",\"bearerToken\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
out.RawString(prefix)
- out.Base64Bytes(x.BearerToken)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -3424,7 +3837,13 @@ func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -3480,7 +3899,13 @@ func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "bearerToken":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.BearerToken = f
}
}
@@ -3616,14 +4041,25 @@ func (x *GetNodeByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3824,19 +4260,39 @@ func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"nodeId\":"
- out.RawString(prefix[1:])
- out.Uint64(x.NodeId)
- }
- {
- const prefix string = ",\"timestamp\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
out.RawString(prefix)
- out.Uint64(x.Timestamp)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"meta\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"timestamp\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -3848,9 +4304,16 @@ func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- const prefix string = ",\"parentId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
out.RawString(prefix)
- out.Uint64(x.ParentId)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -3883,13 +4346,29 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "nodeId":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.NodeId = f
}
case "timestamp":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.Timestamp = f
}
case "meta":
@@ -3909,7 +4388,15 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentId":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.ParentId = f
}
}
@@ -4007,10 +4494,16 @@ func (x *GetNodeByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"nodes\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodes\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Nodes {
if i != 0 {
@@ -4195,14 +4688,25 @@ func (x *GetNodeByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4369,11 +4873,22 @@ func (x *GetSubTreeRequest_Body_Order) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"direction\":"
- out.RawString(prefix[1:])
- out.Int32(int32(x.Direction))
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"direction\":"
+ out.RawString(prefix)
+ v := int32(x.Direction)
+ if vv, ok := GetSubTreeRequest_Body_Order_Direction_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
}
out.RawByte('}')
}
@@ -4619,41 +5134,82 @@ func (x *GetSubTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"rootId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"rootId\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.RootId {
if i != 0 {
out.RawByte(',')
}
- out.Uint64(x.RootId[i])
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.RootId[i], 10)
+ out.RawByte('"')
}
out.RawByte(']')
}
{
- const prefix string = ",\"depth\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"depth\":"
out.RawString(prefix)
out.Uint32(x.Depth)
}
{
- const prefix string = ",\"bearerToken\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
out.RawString(prefix)
- out.Base64Bytes(x.BearerToken)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"orderBy\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"orderBy\":"
out.RawString(prefix)
x.OrderBy.MarshalEasyJSON(out)
}
@@ -4688,7 +5244,13 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -4703,7 +5265,15 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
list = append(list, f)
in.WantComma()
}
@@ -4713,13 +5283,27 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "depth":
{
var f uint32
- f = in.Uint32()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
x.Depth = f
}
case "bearerToken":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.BearerToken = f
}
case "orderBy":
@@ -4862,14 +5446,25 @@ func (x *GetSubTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5076,45 +5671,72 @@ func (x *GetSubTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"nodeId\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.NodeId {
if i != 0 {
out.RawByte(',')
}
- out.Uint64(x.NodeId[i])
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId[i], 10)
+ out.RawByte('"')
}
out.RawByte(']')
}
{
- const prefix string = ",\"parentId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.ParentId {
if i != 0 {
out.RawByte(',')
}
- out.Uint64(x.ParentId[i])
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId[i], 10)
+ out.RawByte('"')
}
out.RawByte(']')
}
{
- const prefix string = ",\"timestamp\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"timestamp\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Timestamp {
if i != 0 {
out.RawByte(',')
}
- out.Uint64(x.Timestamp[i])
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp[i], 10)
+ out.RawByte('"')
}
out.RawByte(']')
}
{
- const prefix string = ",\"meta\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -5159,7 +5781,15 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
list = append(list, f)
in.WantComma()
}
@@ -5172,7 +5802,15 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
list = append(list, f)
in.WantComma()
}
@@ -5185,7 +5823,15 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
list = append(list, f)
in.WantComma()
}
@@ -5339,14 +5985,25 @@ func (x *GetSubTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5481,11 +6138,21 @@ func (x *TreeListRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -5518,7 +6185,13 @@ func (x *TreeListRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
}
@@ -5654,14 +6327,25 @@ func (x *TreeListRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5796,10 +6480,16 @@ func (x *TreeListResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"ids\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"ids\":"
+ out.RawString(prefix)
out.RawByte('[')
for i := range x.Ids {
if i != 0 {
@@ -5983,14 +6673,25 @@ func (x *TreeListResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6168,19 +6869,39 @@ func (x *ApplyRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"operation\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"operation\":"
out.RawString(prefix)
x.Operation.MarshalEasyJSON(out)
}
@@ -6215,7 +6936,13 @@ func (x *ApplyRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -6364,14 +7091,25 @@ func (x *ApplyRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6645,14 +7383,25 @@ func (x *ApplyResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6847,26 +7596,55 @@ func (x *GetOpLogRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"containerId\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.ContainerId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"treeId\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- const prefix string = ",\"height\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"height\":"
out.RawString(prefix)
- out.Uint64(x.Height)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"count\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"count\":"
out.RawString(prefix)
- out.Uint64(x.Count)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Count, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -6899,7 +7677,13 @@ func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.ContainerId = f
}
case "treeId":
@@ -6911,13 +7695,29 @@ func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "height":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.Height = f
}
case "count":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.Count = f
}
}
@@ -7053,14 +7853,25 @@ func (x *GetOpLogRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7198,10 +8009,16 @@ func (x *GetOpLogResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"operation\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"operation\":"
+ out.RawString(prefix)
x.Operation.MarshalEasyJSON(out)
}
out.RawByte('}')
@@ -7372,14 +8189,25 @@ func (x *GetOpLogResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7653,14 +8481,25 @@ func (x *HealthcheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7934,14 +8773,25 @@ func (x *HealthcheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"body\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
x.Body.MarshalEasyJSON(out)
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 58cab659f..8221a4546 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -8,19 +8,17 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "go.uber.org/zap"
)
type message interface {
@@ -30,16 +28,7 @@ type message interface {
SetSignature(*Signature)
}
-func basicACLErr(op acl.Op) error {
- return fmt.Errorf("access to operation %s is denied by basic ACL check", op)
-}
-
-func eACLErr(op eacl.Operation, err error) error {
- return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err)
-}
-
var (
- errBearerWrongOwner = errors.New("bearer token must be signed by the container owner")
errBearerWrongContainer = errors.New("bearer token is created for another container")
errBearerSignature = errors.New("invalid bearer token signature")
)
@@ -49,7 +38,7 @@ var (
// Operation must be one of:
// - 1. ObjectPut;
// - 2. ObjectGet.
-func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error {
+func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error {
err := verifyMessage(req)
if err != nil {
return err
@@ -60,16 +49,14 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return err
}
- cnr, err := s.cnrSource.Get(cid)
+ cnr, err := s.cnrSource.Get(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container %s: %w", cid, err)
}
- eaclOp := eACLOp(op)
-
- bt, err := parseBearer(rawBearer, cid, eaclOp)
+ bt, err := parseBearer(rawBearer, cid)
if err != nil {
- return err
+ return fmt.Errorf("access to operation %s is denied: %w", op, err)
}
role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt)
@@ -77,56 +64,22 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return fmt.Errorf("can't get request role: %w", err)
}
- basicACL := cnr.Value.BasicACL()
- // Basic ACL mask can be unset, if a container operations are performed
- // with strict APE checks only.
- //
- // FIXME(@aarifullin): tree service temporiraly performs APE checks on
- // object verbs, because tree verbs have not been introduced yet.
- if basicACL == 0x0 {
- return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey)
+ if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil {
+ return apeErr(err)
}
+ return nil
+}
- if !basicACL.IsOpAllowed(op, role) {
- return basicACLErr(op)
+func apeErr(err error) error {
+ var chRouterErr *checkercore.ChainRouterError
+ if !errors.As(err, &chRouterErr) {
+ errServerInternal := &apistatus.ServerInternal{}
+ apistatus.WriteInternalServerErr(errServerInternal, err)
+ return errServerInternal
}
-
- if !basicACL.Extendable() {
- return nil
- }
-
- var useBearer bool
- if len(rawBearer) != 0 {
- if !basicACL.AllowedBearerRules(op) {
- s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL,
- zap.String("cid", cid.EncodeToString()),
- zap.Stringer("op", op),
- )
- } else {
- useBearer = true
- }
- }
-
- var tb eacl.Table
- signer := req.GetSignature().GetKey()
- if useBearer && !bt.Impersonate() {
- if !bearer.ResolveIssuer(*bt).Equals(cnr.Value.Owner()) {
- return eACLErr(eaclOp, errBearerWrongOwner)
- }
- tb = bt.EACLTable()
- } else {
- tbCore, err := s.eaclSource.GetEACL(cid)
- if err != nil {
- return handleGetEACLError(err)
- }
- tb = *tbCore.Value
-
- if useBearer && bt.Impersonate() {
- signer = bt.SigningKeyBytes()
- }
- }
-
- return checkEACL(tb, signer, eACLRole(role), eaclOp)
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(err.Error())
+ return errAccessDenied
}
// Returns true iff the operation is read-only and request was signed
@@ -142,40 +95,32 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) {
}
key := sign.GetKey()
- for i := range s.authorizedKeys {
- if bytes.Equal(s.authorizedKeys[i], key) {
+ for _, currentKey := range *s.authorizedKeys.Load() {
+ if bytes.Equal(currentKey, key) {
return true, nil
}
}
return false, nil
}
-func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*bearer.Token, error) {
+func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) {
if len(rawBearer) == 0 {
return nil, nil
}
bt := new(bearer.Token)
if err := bt.Unmarshal(rawBearer); err != nil {
- return nil, eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err))
+ return nil, fmt.Errorf("invalid bearer token: %w", err)
}
if !bt.AssertContainer(cid) {
- return nil, eACLErr(eaclOp, errBearerWrongContainer)
+ return nil, errBearerWrongContainer
}
if !bt.VerifySignature() {
- return nil, eACLErr(eaclOp, errBearerSignature)
+ return nil, errBearerSignature
}
return bt, nil
}
-func handleGetEACLError(err error) error {
- if client.IsErrEACLNotFound(err) {
- return nil
- }
-
- return fmt.Errorf("get eACL table: %w", err)
-}
-
func verifyMessage(m message) error {
binBody, err := m.ReadSignedData(nil)
if err != nil {
@@ -249,84 +194,3 @@ func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (a
return role, pub, nil
}
-
-func eACLOp(op acl.Op) eacl.Operation {
- switch op {
- case acl.OpObjectGet:
- return eacl.OperationGet
- case acl.OpObjectPut:
- return eacl.OperationPut
- default:
- panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op))
- }
-}
-
-func eACLRole(role acl.Role) eacl.Role {
- switch role {
- case acl.RoleOwner:
- return eacl.RoleUser
- case acl.RoleOthers:
- return eacl.RoleOthers
- default:
- panic(fmt.Sprintf("unexpected tree service ACL role: %s", role))
- }
-}
-
-var (
- errDENY = errors.New("DENY eACL rule")
- errNoAllowRules = errors.New("not found allowing rules for the request")
-)
-
-// checkEACL searches for the eACL rules that could be applied to the request
-// (a tuple of a signer key, his FrostFS role and a request operation).
-// It does not filter the request by the filters of the eACL table since tree
-// requests do not contain any "object" information that could be filtered and,
-// therefore, filtering leads to unexpected results.
-// The code was copied with the minor updates from the SDK repo:
-// https://github.com/nspcc-dev/frostfs-sdk-go/blob/43a57d42dd50dc60465bfd3482f7f12bcfcf3411/eacl/validator.go#L28.
-func checkEACL(tb eacl.Table, signer []byte, role eacl.Role, op eacl.Operation) error {
- for _, record := range tb.Records() {
- // check type of operation
- if record.Operation() != op {
- continue
- }
-
- // check target
- if !targetMatches(record, role, signer) {
- continue
- }
-
- switch a := record.Action(); a {
- case eacl.ActionAllow:
- return nil
- case eacl.ActionDeny:
- return eACLErr(op, errDENY)
- default:
- return eACLErr(op, fmt.Errorf("unexpected action: %s", a))
- }
- }
-
- return eACLErr(op, errNoAllowRules)
-}
-
-func targetMatches(rec eacl.Record, role eacl.Role, signer []byte) bool {
- for _, target := range rec.Targets() {
- // check public key match
- if pubs := target.BinaryKeys(); len(pubs) != 0 {
- for _, key := range pubs {
- if bytes.Equal(key, signer) {
- return true
- }
- }
-
- continue
- }
-
- // check target group match
- if role == target.Role() {
- return true
- }
- }
-
- return false
-}
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index 3c3ebfe89..8815c227f 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -4,32 +4,70 @@ import (
"context"
"crypto/ecdsa"
"crypto/sha256"
+ "encoding/hex"
"errors"
"testing"
- aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ aclV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
)
+const versionTreeID = "version"
+
type dummyNetmapSource struct {
netmap.Source
}
+type dummySubjectProvider struct {
+ subjects map[util.Uint160]client.SubjectExtended
+}
+
+func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
+ res := s.subjects[addr]
+ return &client.Subject{
+ PrimaryKey: res.PrimaryKey,
+ AdditionalKeys: res.AdditionalKeys,
+ Namespace: res.Namespace,
+ Name: res.Name,
+ KV: res.KV,
+ }, nil
+}
+
+func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
+ res := s.subjects[addr]
+ return &res, nil
+}
+
+type dummyEpochSource struct {
+ epoch uint64
+}
+
+func (s dummyEpochSource) CurrentEpoch() uint64 {
+ return s.epoch
+}
+
type dummyContainerSource map[string]*containercore.Container
-func (s dummyContainerSource) List() ([]cid.ID, error) {
+func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
res := make([]cid.ID, 0, len(s))
var cnr cid.ID
@@ -45,7 +83,7 @@ func (s dummyContainerSource) List() ([]cid.ID, error) {
return res, nil
}
-func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
+func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) {
cnt, ok := s[id.String()]
if !ok {
return nil, errors.New("container not found")
@@ -53,20 +91,10 @@ func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
return cnt, nil
}
-func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) {
+func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) {
return &containercore.DelInfo{}, nil
}
-type dummyEACLSource map[string]*containercore.EACL
-
-func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) {
- cntEACL, ok := s[id.String()]
- if !ok {
- return nil, errors.New("container not found")
- }
- return cntEACL, nil
-}
-
func testContainer(owner user.ID) container.Container {
var r netmapSDK.ReplicaDescriptor
r.SetNumberOfObjects(1)
@@ -81,6 +109,8 @@ func testContainer(owner user.ID) container.Container {
return cnt
}
+const currentEpoch = 123
+
func TestMessageSign(t *testing.T) {
privs := make([]*keys.PrivateKey, 4)
for i := range privs {
@@ -99,6 +129,15 @@ func TestMessageSign(t *testing.T) {
Value: testContainer(ownerID),
}
+ e := inmemory.NewInMemoryLocalOverrides()
+ e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{
+ Type: engine.Container,
+ Name: cid1.EncodeToString(),
+ }, testChain(privs[0].PublicKey(), privs[1].PublicKey()))
+ frostfsidProvider := dummySubjectProvider{
+ subjects: make(map[util.Uint160]client.SubjectExtended),
+ }
+
s := &Service{
cfg: cfg{
log: test.NewLogger(t),
@@ -107,14 +146,13 @@ func TestMessageSign(t *testing.T) {
cnrSource: dummyContainerSource{
cid1.String(): cnr,
},
- eaclSource: dummyEACLSource{
- cid1.String(): &containercore.EACL{
- Value: testTable(cid1, privs[0].PublicKey(), privs[1].PublicKey()),
- },
- },
+ frostfsidSubjectProvider: frostfsidProvider,
+ state: dummyEpochSource{epoch: currentEpoch},
},
+ apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}),
}
+ s.cfg.authorizedKeys.Store(&[][]byte{})
rawCID1 := make([]byte, sha256.Size)
cid1.Encode(rawCID1)
@@ -133,26 +171,26 @@ func TestMessageSign(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRW)
t.Run("missing signature, no panic", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
t.Run("invalid CID", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
cnr.Value.SetBasicACL(acl.Private)
t.Run("extension disabled", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
t.Run("invalid key", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
})
t.Run("bearer", func(t *testing.T) {
@@ -165,7 +203,7 @@ func TestMessageSign(t *testing.T) {
t.Run("invalid bearer", func(t *testing.T) {
req.Body.BearerToken = []byte{0xFF}
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer CID", func(t *testing.T) {
@@ -174,7 +212,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer owner", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -182,7 +220,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer signature", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -194,20 +232,112 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bv2.StableMarshal(nil)
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ })
+
+ t.Run("omit override within bt", func(t *testing.T) {
+ t.Run("personated", func(t *testing.T) {
+ bt := testBearerTokenNoOverride()
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override")
+ })
+
+ t.Run("impersonated", func(t *testing.T) {
+ bt := testBearerTokenNoOverride()
+ bt.SetImpersonate(true)
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ })
+ })
+
+ t.Run("invalid override within bearer token", func(t *testing.T) {
+ t.Run("personated", func(t *testing.T) {
+ bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
+ })
+
+ t.Run("impersonated", func(t *testing.T) {
+ bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
+ bt.SetImpersonate(true)
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
+ })
})
t.Run("impersonate", func(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRWExtended)
var bt bearer.Token
+ bt.SetExp(10)
+ bt.SetImpersonate(true)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ })
+
+ t.Run("impersonate, but target user is still set", func(t *testing.T) {
+ var bt bearer.Token
+ bt.SetExp(10)
bt.SetImpersonate(true)
+ var reqSigner user.ID
+ user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey()))
+
+ bt.ForUser(reqSigner)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ })
+
+ t.Run("impersonate but invalid signer", func(t *testing.T) {
+ var bt bearer.Token
+ bt.SetExp(10)
+ bt.SetImpersonate(true)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
require.NoError(t, bt.Sign(privs[1].PrivateKey))
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -217,64 +347,95 @@ func TestMessageSign(t *testing.T) {
t.Run("put and get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("only get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[2].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("none", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[3].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
})
}
func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token {
var b bearer.Token
- b.SetEACLTable(*testTable(cid, forPutGet, forGet))
+ b.SetExp(currentEpoch + 1)
+ b.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid.EncodeToString(),
+ },
+ Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
+ })
return b
}
-func testTable(cid cid.ID, forPutGet, forGet *keys.PublicKey) *eaclSDK.Table {
- tgtGet := eaclSDK.NewTarget()
- tgtGet.SetRole(eaclSDK.RoleUnknown)
- tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()})
+func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token {
+ var b bearer.Token
+ b.SetExp(currentEpoch + 1)
+ b.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ },
+ Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
+ })
- rGet := eaclSDK.NewRecord()
- rGet.SetAction(eaclSDK.ActionAllow)
- rGet.SetOperation(eaclSDK.OperationGet)
- rGet.SetTargets(*tgtGet)
+ return b
+}
- tgtPut := eaclSDK.NewTarget()
- tgtPut.SetRole(eaclSDK.RoleUnknown)
- tgtPut.SetBinaryKeys([][]byte{forPutGet.Bytes()})
+func testBearerTokenNoOverride() bearer.Token {
+ var b bearer.Token
+ b.SetExp(currentEpoch + 1)
+ return b
+}
- rPut := eaclSDK.NewRecord()
- rPut.SetAction(eaclSDK.ActionAllow)
- rPut.SetOperation(eaclSDK.OperationPut)
- rPut.SetTargets(*tgtPut)
-
- tb := eaclSDK.NewTable()
- tb.AddRecord(rGet)
- tb.AddRecord(rPut)
-
- tgt := eaclSDK.NewTarget()
- tgt.SetRole(eaclSDK.RoleOthers)
-
- for _, op := range []eaclSDK.Operation{eaclSDK.OperationGet, eaclSDK.OperationPut} {
- r := eaclSDK.NewRecord()
- r.SetAction(eaclSDK.ActionDeny)
- r.SetTargets(*tgt)
- r.SetOperation(op)
- tb.AddRecord(r)
+func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain {
+ ruleGet := chain.Rule{
+ Status: chain.Allow,
+ Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
+ Actions: chain.Actions{Names: []string{native.MethodGetObject}},
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forPutGet.Bytes()),
+ },
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forGet.Bytes()),
+ },
+ },
+ }
+ rulePut := chain.Rule{
+ Status: chain.Allow,
+ Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
+ Actions: chain.Actions{Names: []string{native.MethodPutObject}},
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forPutGet.Bytes()),
+ },
+ },
}
- tb.SetCID(cid)
-
- return tb
+ return &chain.Chain{
+ Rules: []chain.Rule{
+ ruleGet,
+ rulePut,
+ },
+ }
}
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index 5bbc93978..af355639f 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -2,7 +2,9 @@ package tree
import (
"context"
+ "crypto/ecdsa"
"crypto/sha256"
+ "crypto/tls"
"errors"
"fmt"
"io"
@@ -13,6 +15,8 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -20,12 +24,15 @@ import (
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@@ -39,7 +46,7 @@ const defaultSyncWorkerCount = 20
// tree IDs from the other container nodes. Returns ErrNotInContainer if the node
// is not included in the container.
func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
- nodes, pos, err := s.getContainerNodes(cid)
+ nodes, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -71,8 +78,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
var treesToSync []string
var outErr error
- err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool {
- resp, outErr = c.TreeList(ctx, req)
+ err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool {
+ resp, outErr = c.TreeList(fCtx, req)
if outErr != nil {
return false
}
@@ -92,7 +99,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
for _, tid := range treesToSync {
h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree,
+ s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
continue
@@ -100,7 +107,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
if h < newHeight {
if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
- s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
+ s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
}
@@ -112,7 +119,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
// SynchronizeTree tries to synchronize log starting from the last stored height.
func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error {
- nodes, pos, err := s.getContainerNodes(cid)
+ nodes, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -131,14 +138,9 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string
}
// mergeOperationStreams performs merge sort for node operation streams to one stream.
-func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
+func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
defer close(merged)
- ms := make([]*pilorama.Move, len(streams))
- for i := range streams {
- ms[i] = <-streams[i]
- }
-
// Merging different node streams shuffles incoming operations like that:
//
// x - operation from the stream A
@@ -150,6 +152,15 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram
// operation height from the stream B. This height is stored in minStreamedLastHeight.
var minStreamedLastHeight uint64 = math.MaxUint64
+ ms := make([]*pilorama.Move, len(streams))
+ for i := range streams {
+ select {
+ case ms[i] = <-streams[i]:
+ case <-ctx.Done():
+ return minStreamedLastHeight
+ }
+ }
+
for {
var minTimeMoveTime uint64 = math.MaxUint64
minTimeMoveIndex := -1
@@ -164,7 +175,11 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram
break
}
- merged <- ms[minTimeMoveIndex]
+ select {
+ case merged <- ms[minTimeMoveIndex]:
+ case <-ctx.Done():
+ return minStreamedLastHeight
+ }
height := ms[minTimeMoveIndex].Time
if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
minStreamedLastHeight = min(minStreamedLastHeight, height)
@@ -176,38 +191,30 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram
func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string,
operationStream <-chan *pilorama.Move,
-) uint64 {
- errGroup, _ := errgroup.WithContext(ctx)
- const workersCount = 1024
- errGroup.SetLimit(workersCount)
-
- // We run TreeApply concurrently for the operation batch. Let's consider two operations
- // in the batch m1 and m2 such that m1.Time < m2.Time. The engine may apply m2 and fail
- // on m1. That means the service must start sync from m1.Time in the next iteration and
- // this height is stored in unappliedOperationHeight.
- var unappliedOperationHeight uint64 = math.MaxUint64
- var heightMtx sync.Mutex
-
+) (uint64, error) {
var prev *pilorama.Move
+ var batch []*pilorama.Move
for m := range operationStream {
// skip already applied op
if prev != nil && prev.Time == m.Time {
continue
}
prev = m
+ batch = append(batch, m)
- errGroup.Go(func() error {
- if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil {
- heightMtx.Lock()
- unappliedOperationHeight = min(unappliedOperationHeight, m.Time)
- heightMtx.Unlock()
- return err
+ if len(batch) == s.syncBatchSize {
+ if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
+ return batch[0].Time, err
}
- return nil
- })
+ batch = batch[:0]
+ }
}
- _ = errGroup.Wait()
- return unappliedOperationHeight
+ if len(batch) > 0 {
+ if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
+ return batch[0].Time, err
+ }
+ }
+ return math.MaxUint64, nil
}
func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
@@ -240,10 +247,14 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
Parent: lm.GetParentId(),
Child: lm.GetChildId(),
}
- if err := m.Meta.FromBytes(lm.GetMeta()); err != nil {
+ if err := m.FromBytes(lm.GetMeta()); err != nil {
return err
}
- opsCh <- m
+ select {
+ case opsCh <- m:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
}
if !errors.Is(err, io.EOF) {
return err
@@ -259,7 +270,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
treeID string, nodes []netmapSDK.NodeInfo,
) uint64 {
- s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
+ s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
errGroup, egCtx := errgroup.WithContext(ctx)
const workersCount = 1024
@@ -272,13 +283,14 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
merged := make(chan *pilorama.Move)
var minStreamedLastHeight uint64
errGroup.Go(func() error {
- minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged)
+ minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged)
return nil
})
var minUnappliedHeight uint64
errGroup.Go(func() error {
- minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged)
- return nil
+ var err error
+ minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged)
+ return err
})
var allNodesSynced atomic.Bool
@@ -287,27 +299,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
for i, n := range nodes {
errGroup.Go(func() error {
var nodeSynced bool
- n.IterateNetworkEndpoints(func(addr string) bool {
+ for addr := range n.NetworkEndpoints() {
var a network.Address
if err := a.FromString(addr); err != nil {
- s.log.Warn(logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- return false
+ s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ continue
}
- cc, err := s.createConnection(a)
+ cc, err := dialTreeService(ctx, a, s.key, s.ds)
if err != nil {
- s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- return false
+ s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ continue
}
- defer cc.Close()
err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i])
if err != nil {
- s.log.Warn(logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
+ s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
}
nodeSynced = err == nil
- return true
- })
+ _ = cc.Close()
+ break
+ }
close(nodeOperationStreams[i])
if !nodeSynced {
allNodesSynced.Store(false)
@@ -317,7 +329,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
}
if err := errGroup.Wait(); err != nil {
allNodesSynced.Store(false)
- s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
+ s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
}
newHeight := minStreamedLastHeight
@@ -332,17 +344,60 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return from
}
-func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
- return grpc.NewClient(a.URIAddr(),
+func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) {
+ cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer()))
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ defer cancel()
+
+ req := &HealthcheckRequest{
+ Body: &HealthcheckRequest_Body{},
+ }
+ if err := SignMessage(req, key); err != nil {
+ return nil, err
+ }
+
+ // perform some request to check connection
+ if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
+ _ = cc.Close()
+ return nil, err
+ }
+ return cc, nil
+}
+
+func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ host, isTLS, err := client.ParseURI(a.URIAddr())
+ if err != nil {
+ return nil, err
+ }
+
+ creds := insecure.NewCredentials()
+ if isTLS {
+ creds = credentials.NewTLS(&tls.Config{})
+ }
+
+ defaultOpts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
+ qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
- tracing_grpc.NewUnaryClientInteceptor(),
+ tracing_grpc.NewUnaryClientInterceptor(),
+ tagging.NewUnaryClientInterceptor(),
),
grpc.WithChainStreamInterceptor(
+ qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
metrics.NewStreamClientInterceptor(),
tracing_grpc.NewStreamClientInterceptor(),
+ tagging.NewStreamClientInterceptor(),
),
- grpc.WithTransportCredentials(insecure.NewCredentials()))
+ grpc.WithTransportCredentials(creds),
+ grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
+ grpc.WithDisableServiceConfig(),
+ }
+
+ return grpc.NewClient(host, append(defaultOpts, opts...)...)
}
// ErrAlreadySyncing is returned when a service synchronization has already
@@ -382,25 +437,25 @@ func (s *Service) syncLoop(ctx context.Context) {
return
case <-s.syncChan:
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
- s.log.Debug(logs.TreeSyncingTrees)
+ s.log.Info(ctx, logs.TreeSyncingTrees)
start := time.Now()
- cnrs, err := s.cfg.cnrSource.List()
+ cnrs, err := s.cnrSource.List(ctx)
if err != nil {
- s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err))
+ s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
s.metrics.AddSyncDuration(time.Since(start), false)
span.End()
break
}
- newMap, cnrsToSync := s.containersToSync(cnrs)
+ newMap, cnrsToSync := s.containersToSync(ctx, cnrs)
s.syncContainers(ctx, cnrsToSync)
s.removeContainers(ctx, newMap)
- s.log.Debug(logs.TreeTreesHaveBeenSynchronized)
+ s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized)
s.metrics.AddSyncDuration(time.Since(start), true)
span.End()
@@ -420,19 +475,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
err := s.syncPool.Submit(func() {
defer wg.Done()
- s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
err := s.synchronizeAllTrees(ctx, cnr)
if err != nil {
- s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
+ s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
return
}
- s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
+ s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
})
if err != nil {
wg.Done()
- s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization,
+ s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization,
zap.Stringer("cid", cnr),
zap.Error(err))
if errors.Is(err, ants.ErrPoolClosed) {
@@ -456,9 +511,9 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
continue
}
- existed, err := containerCore.WasRemoved(s.cnrSource, cnr)
+ existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr)
if err != nil {
- s.log.Error(logs.TreeCouldNotCheckIfContainerExisted,
+ s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted,
zap.Stringer("cid", cnr),
zap.Error(err))
} else if existed {
@@ -470,25 +525,25 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
}
for _, cnr := range removed {
- s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
err := s.DropTree(ctx, cnr, "")
if err != nil {
- s.log.Error(logs.TreeCouldNotRemoveRedundantTree,
+ s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree,
zap.Stringer("cid", cnr),
zap.Error(err))
}
}
}
-func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
+func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
newMap := make(map[cid.ID]struct{}, len(s.cnrMap))
cnrsToSync := make([]cid.ID, 0, len(cnrs))
for _, cnr := range cnrs {
- _, pos, err := s.getContainerNodes(cnr)
+ _, pos, err := s.getContainerNodes(ctx, cnr)
if err != nil {
- s.log.Error(logs.TreeCouldNotCalculateContainerNodes,
+ s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes,
zap.Stringer("cid", cnr),
zap.Error(err))
continue
diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go
index 497d90554..87d419408 100644
--- a/pkg/services/tree/sync_test.go
+++ b/pkg/services/tree/sync_test.go
@@ -1,6 +1,7 @@
package tree
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -64,7 +65,7 @@ func Test_mergeOperationStreams(t *testing.T) {
merged := make(chan *pilorama.Move, 1)
min := make(chan uint64)
go func() {
- min <- mergeOperationStreams(nodeOpChans, merged)
+ min <- mergeOperationStreams(context.Background(), nodeOpChans, merged)
}()
var res []uint64
diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go
index 4399f8a8b..2827b10a9 100644
--- a/pkg/services/tree/types_frostfs.pb.go
+++ b/pkg/services/tree/types_frostfs.pb.go
@@ -5,12 +5,13 @@ package tree
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
)
type KeyValue struct {
@@ -113,16 +114,31 @@ func (x *KeyValue) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"key\":"
- out.RawString(prefix[1:])
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
out.String(x.Key)
}
{
- const prefix string = ",\"value\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"value\":"
out.RawString(prefix)
- out.Base64Bytes(x.Value)
+ if x.Value != nil {
+ out.Base64Bytes(x.Value)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -161,7 +177,13 @@ func (x *KeyValue) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "value":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Value = f
}
}
@@ -293,21 +315,45 @@ func (x *LogMove) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"parentID\":"
- out.RawString(prefix[1:])
- out.Uint64(x.ParentId)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentID\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
}
{
- const prefix string = ",\"meta\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
out.RawString(prefix)
- out.Base64Bytes(x.Meta)
+ if x.Meta != nil {
+ out.Base64Bytes(x.Meta)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"childID\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"childID\":"
out.RawString(prefix)
- out.Uint64(x.ChildId)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ChildId, 10)
+ out.RawByte('"')
}
out.RawByte('}')
}
@@ -340,19 +386,41 @@ func (x *LogMove) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentID":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.ParentId = f
}
case "meta":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Meta = f
}
case "childID":
{
var f uint64
- f = in.Uint64()
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
x.ChildId = f
}
}
@@ -464,16 +532,35 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
+ first := true
out.RawByte('{')
{
- const prefix string = ",\"key\":"
- out.RawString(prefix[1:])
- out.Base64Bytes(x.Key)
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ if x.Key != nil {
+ out.Base64Bytes(x.Key)
+ } else {
+ out.String("")
+ }
}
{
- const prefix string = ",\"signature\":"
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
out.RawString(prefix)
- out.Base64Bytes(x.Sign)
+ if x.Sign != nil {
+ out.Base64Bytes(x.Sign)
+ } else {
+ out.String("")
+ }
}
out.RawByte('}')
}
@@ -506,13 +593,25 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "key":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Key = f
}
case "signature":
{
var f []byte
- f = in.Bytes()
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
x.Sign = f
}
}
diff --git a/pkg/services/util/response/service.go b/pkg/services/util/response/service.go
index 005a643e5..5152a8ece 100644
--- a/pkg/services/util/response/service.go
+++ b/pkg/services/util/response/service.go
@@ -1,10 +1,10 @@
package response
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go
index bce43d6e8..348a45a94 100644
--- a/pkg/services/util/sign.go
+++ b/pkg/services/util/sign.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
diff --git a/internal/ape/converter.go b/pkg/util/ape/converter.go
similarity index 99%
rename from internal/ape/converter.go
rename to pkg/util/ape/converter.go
index eb80e7ded..c706cf052 100644
--- a/internal/ape/converter.go
+++ b/pkg/util/ape/converter.go
@@ -4,7 +4,7 @@ import (
"encoding/hex"
"fmt"
- v2acl "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ v2acl "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
diff --git a/internal/ape/converter_test.go b/pkg/util/ape/converter_test.go
similarity index 100%
rename from internal/ape/converter_test.go
rename to pkg/util/ape/converter_test.go
diff --git a/cmd/frostfs-cli/modules/util/ape.go b/pkg/util/ape/parser.go
similarity index 87%
rename from cmd/frostfs-cli/modules/util/ape.go
rename to pkg/util/ape/parser.go
index 73c368510..6f114d45b 100644
--- a/cmd/frostfs-cli/modules/util/ape.go
+++ b/pkg/util/ape/parser.go
@@ -1,16 +1,14 @@
-package util
+package ape
import (
"errors"
"fmt"
"os"
- "strconv"
"strings"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/flynn-archive/go-shlex"
- "github.com/spf13/cobra"
)
var (
@@ -27,38 +25,6 @@ var (
errFailedToParseAllAny = errors.New("any/all is not parsed")
)
-// PrintHumanReadableAPEChain print APE chain rules.
-func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) {
- cmd.Println("Chain ID: " + string(chain.ID))
- cmd.Printf(" HEX: %x\n", chain.ID)
- cmd.Println("Rules:")
- for _, rule := range chain.Rules {
- cmd.Println("\n\tStatus: " + rule.Status.String())
- cmd.Println("\tAny: " + strconv.FormatBool(rule.Any))
- cmd.Println("\tConditions:")
- for _, c := range rule.Condition {
- var ot string
- switch c.Kind {
- case apechain.KindResource:
- ot = "Resource"
- case apechain.KindRequest:
- ot = "Request"
- default:
- panic("unknown object type")
- }
- cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value))
- }
- cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted))
- for _, name := range rule.Actions.Names {
- cmd.Println("\t\t" + name)
- }
- cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted))
- for _, name := range rule.Resources.Names {
- cmd.Println("\t\t" + name)
- }
- }
-}
-
func ParseAPEChainBinaryOrJSON(chain *apechain.Chain, path string) error {
data, err := os.ReadFile(path)
if err != nil {
@@ -208,11 +174,11 @@ func parseStatus(lexeme string) (apechain.Status, error) {
case "deny":
if !found {
return apechain.AccessDenied, nil
- } else if strings.EqualFold(expression, "QuotaLimitReached") {
- return apechain.QuotaLimitReached, nil
- } else {
- return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression)
}
+ if strings.EqualFold(expression, "QuotaLimitReached") {
+ return apechain.QuotaLimitReached, nil
+ }
+ return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression)
case "allow":
if found {
return 0, errUnknownStatusDetail
@@ -295,7 +261,7 @@ func parseResource(lexeme string, isObj bool) (string, error) {
} else {
if lexeme == "*" {
return nativeschema.ResourceFormatAllContainers, nil
- } else if lexeme == "/*" {
+ } else if lexeme == "/*" || lexeme == "root/*" {
return nativeschema.ResourceFormatRootContainers, nil
} else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 {
lexeme = lexeme[1:]
diff --git a/cmd/frostfs-cli/modules/util/ape_test.go b/pkg/util/ape/parser_test.go
similarity index 96%
rename from cmd/frostfs-cli/modules/util/ape_test.go
rename to pkg/util/ape/parser_test.go
index b275803df..c236c4603 100644
--- a/cmd/frostfs-cli/modules/util/ape_test.go
+++ b/pkg/util/ape/parser_test.go
@@ -1,4 +1,4 @@
-package util
+package ape
import (
"fmt"
@@ -43,6 +43,15 @@ func TestParseAPERule(t *testing.T) {
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
},
},
+ {
+ name: "Valid rule for all containers in explicit root namespace",
+ rule: "allow Container.Put root/*",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}},
+ },
+ },
{
name: "Valid rule for all objects in root namespace and container",
rule: "allow Object.Put /cid/*",
diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go
index 547c8d50b..66581878a 100644
--- a/pkg/util/attributes/parser_test.go
+++ b/pkg/util/attributes/parser_test.go
@@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) {
mExp = mSrc
}
- node.IterateAttributes(func(key, value string) {
+ for key, value := range node.Attributes() {
v, ok := mExp[key]
require.True(t, ok)
require.Equal(t, value, v)
delete(mExp, key)
- })
+ }
require.Empty(t, mExp)
}
diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go
index a9877e007..8569ec734 100644
--- a/pkg/util/http/calls.go
+++ b/pkg/util/http/calls.go
@@ -32,8 +32,8 @@ func (x *Server) Serve() error {
//
// Once Shutdown has been called on a server, it may not be reused;
// future calls to Serve method will have no effect.
-func (x *Server) Shutdown() error {
- ctx, cancel := context.WithTimeout(context.Background(), x.shutdownTimeout)
+func (x *Server) Shutdown(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout)
err := x.srv.Shutdown(ctx)
diff --git a/pkg/util/http/pprof.go b/pkg/util/http/pprof.go
index 7a0413000..f85fd2ea9 100644
--- a/pkg/util/http/pprof.go
+++ b/pkg/util/http/pprof.go
@@ -3,8 +3,14 @@ package httputil
import (
"net/http"
"net/http/pprof"
+
+ "github.com/felixge/fgprof"
)
+func init() {
+ http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler())
+}
+
// initializes pprof package in order to
// register Prometheus handlers on http.DefaultServeMux.
var _ = pprof.Handler("")
diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go
index 923412a7f..2589ab786 100644
--- a/pkg/util/http/server.go
+++ b/pkg/util/http/server.go
@@ -76,8 +76,7 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server {
o(c)
}
- switch {
- case c.shutdownTimeout <= 0:
+ if c.shutdownTimeout <= 0 {
panicOnOptValue("shutdown timeout", c.shutdownTimeout)
}
diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go
index b2942b52a..6337039a9 100644
--- a/pkg/util/keyer/dashboard.go
+++ b/pkg/util/keyer/dashboard.go
@@ -6,6 +6,7 @@ import (
"os"
"text/tabwriter"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -104,9 +105,7 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) {
func base58ToHex(data string) string {
val, err := base58.Decode(data)
- if err != nil {
- panic("produced incorrect base58 value")
- }
+ assert.NoError(err, "produced incorrect base58 value")
return hex.EncodeToString(val)
}
diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go
new file mode 100644
index 000000000..413b1d9aa
--- /dev/null
+++ b/pkg/util/logger/log.go
@@ -0,0 +1,35 @@
+package logger
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
+ qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "go.uber.org/zap"
+)
+
+func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Debug(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Info(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Warn(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Error(msg, appendContext(ctx, fields...)...)
+}
+
+func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field {
+ if traceID := tracing.GetTraceID(ctx); traceID != "" {
+ fields = append(fields, zap.String("trace_id", traceID))
+ }
+ if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined {
+ fields = append(fields, zap.String("io_tag", ioTag))
+ }
+ return fields
+}
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index 4b60f02de..a1998cb1a 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -2,6 +2,7 @@ package logger
import (
"fmt"
+ "time"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/ssgreg/journald"
@@ -12,8 +13,10 @@ import (
// Logger represents a component
// for writing messages to log.
type Logger struct {
- *zap.Logger
- lvl zap.AtomicLevel
+ z *zap.Logger
+ c zapcore.Core
+ t Tag
+ w bool
}
// Prm groups Logger's parameters.
@@ -22,16 +25,8 @@ type Logger struct {
// Parameters that have been connected to the Logger support its
// configuration changing.
//
-// Passing Prm after a successful connection via the NewLogger, connects
-// the Prm to a new instance of the Logger.
-//
-// See also Reload, SetLevelString.
+// See also Logger.Reload, SetLevelString.
type Prm struct {
- // link to the created Logger
- // instance; used for a runtime
- // reconfiguration
- _log *Logger
-
// support runtime rereading
level zapcore.Level
@@ -43,6 +38,12 @@ type Prm struct {
// PrependTimestamp specifies whether to prepend a timestamp in the log
PrependTimestamp bool
+
+ // Options for zap.Logger
+ Options []zap.Option
+
+ // map of tag's bit masks to log level, overrides lvl
+ tl map[Tag]zapcore.Level
}
const (
@@ -72,20 +73,10 @@ func (p *Prm) SetDestination(d string) error {
return nil
}
-// Reload reloads configuration of a connected instance of the Logger.
-// Returns ErrLoggerNotConnected if no connection has been performed.
-// Returns any reconfiguration error from the Logger directly.
-func (p Prm) Reload() error {
- if p._log == nil {
- // incorrect logger usage
- panic("parameters are not connected to any Logger")
- }
-
- return p._log.reload(p)
-}
-
-func defaultPrm() *Prm {
- return new(Prm)
+// SetTags parses list of tags with log level.
+func (p *Prm) SetTags(tags [][]string) (err error) {
+ p.tl, err = parseTags(tags)
+ return err
}
// NewLogger constructs a new zap logger instance. Constructing with nil
@@ -99,10 +90,7 @@ func defaultPrm() *Prm {
// - ISO8601 time encoding.
//
// Logger records a stack trace for all messages at or above fatal level.
-func NewLogger(prm *Prm) (*Logger, error) {
- if prm == nil {
- prm = defaultPrm()
- }
+func NewLogger(prm Prm) (*Logger, error) {
switch prm.dest {
case DestinationUndefined, DestinationStdout:
return newConsoleLogger(prm)
@@ -113,11 +101,9 @@ func NewLogger(prm *Prm) (*Logger, error) {
}
}
-func newConsoleLogger(prm *Prm) (*Logger, error) {
- lvl := zap.NewAtomicLevelAt(prm.level)
-
+func newConsoleLogger(prm Prm) (*Logger, error) {
c := zap.NewProductionConfig()
- c.Level = lvl
+ c.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
c.Encoding = "console"
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
@@ -129,25 +115,23 @@ func newConsoleLogger(prm *Prm) (*Logger, error) {
c.EncoderConfig.TimeKey = ""
}
- lZap, err := c.Build(
+ opts := []zap.Option{
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- )
+ zap.AddCallerSkip(1),
+ }
+ opts = append(opts, prm.Options...)
+ lZap, err := c.Build(opts...)
if err != nil {
return nil, err
}
-
- l := &Logger{Logger: lZap, lvl: lvl}
- prm._log = l
+ l := &Logger{z: lZap, c: lZap.Core()}
+ l = l.WithTag(TagMain)
return l, nil
}
-func newJournaldLogger(prm *Prm) (*Logger, error) {
- lvl := zap.NewAtomicLevelAt(prm.level)
-
+func newJournaldLogger(prm Prm) (*Logger, error) {
c := zap.NewProductionConfig()
- c.Level = lvl
- c.Encoding = "console"
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
@@ -160,22 +144,100 @@ func newJournaldLogger(prm *Prm) (*Logger, error) {
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
- core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
+ core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields)
coreWithContext := core.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
})
- lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
-
- l := &Logger{Logger: lZap, lvl: lvl}
- prm._log = l
+ var samplerOpts []zapcore.SamplerOption
+ if c.Sampling.Hook != nil {
+ samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook))
+ }
+ samplingCore := zapcore.NewSamplerWithOptions(
+ coreWithContext,
+ time.Second,
+ c.Sampling.Initial,
+ c.Sampling.Thereafter,
+ samplerOpts...,
+ )
+ opts := []zap.Option{
+ zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
+ zap.AddCallerSkip(1),
+ }
+ opts = append(opts, prm.Options...)
+ lZap := zap.New(samplingCore, opts...)
+ l := &Logger{z: lZap, c: lZap.Core()}
+ l = l.WithTag(TagMain)
return l, nil
}
-func (l *Logger) reload(prm Prm) error {
- l.lvl.SetLevel(prm.level)
- return nil
+// With create a child logger with new fields, don't affect the parent.
+// Throws panic if tag is unset.
+func (l *Logger) With(fields ...zap.Field) *Logger {
+ if l.t == 0 {
+ panic("tag is unset")
+ }
+ c := *l
+ c.z = l.z.With(fields...)
+ // With called under the logger
+ c.w = true
+ return &c
+}
+
+type core struct {
+ c zapcore.Core
+ l zap.AtomicLevel
+}
+
+func (c *core) Enabled(lvl zapcore.Level) bool {
+ return c.l.Enabled(lvl)
+}
+
+func (c *core) With(fields []zapcore.Field) zapcore.Core {
+ clone := *c
+ clone.c = clone.c.With(fields)
+ return &clone
+}
+
+func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
+ return c.c.Check(e, ce)
+}
+
+func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error {
+ return c.c.Write(e, fields)
+}
+
+func (c *core) Sync() error {
+ return c.c.Sync()
+}
+
+// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger.
+// Throws panic if provided unsupported tag.
+func (l *Logger) WithTag(tag Tag) *Logger {
+ if tag == 0 || tag > Tag(len(_Tag_index)-1) {
+ panic("unsupported tag " + tag.String())
+ }
+ if l.w {
+ panic("unsupported operation for the logger's state")
+ }
+ c := *l
+ c.t = tag
+ c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core {
+ return &core{
+ c: l.c.With([]zap.Field{zap.String("tag", tag.String())}),
+ l: tagToLogLevel[tag],
+ }
+ }))
+ return &c
+}
+
+func NewLoggerWrapper(z *zap.Logger) *Logger {
+ return &Logger{
+ z: z.WithOptions(zap.AddCallerSkip(1)),
+ t: TagMain,
+ c: z.Core(),
+ }
}
diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go
new file mode 100644
index 000000000..b867ee6cc
--- /dev/null
+++ b/pkg/util/logger/logger_test.go
@@ -0,0 +1,118 @@
+package logger
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest/observer"
+)
+
+func BenchmarkLogger(b *testing.B) {
+ ctx := context.Background()
+ m := map[string]Prm{}
+
+ prm := Prm{}
+ require.NoError(b, prm.SetLevelString("debug"))
+ m["logging enabled"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("error"))
+ m["logging disabled"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("error"))
+ require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}}))
+ m["logging enabled via tags"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("debug"))
+ require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}}))
+ m["logging disabled via tags"] = prm
+
+ for k, v := range m {
+ b.Run(k, func(b *testing.B) {
+ logger, err := createLogger(v)
+ require.NoError(b, err)
+ UpdateLevelForTags(v)
+ b.ResetTimer()
+ b.ReportAllocs()
+ for range b.N {
+ logger.Info(ctx, "test info")
+ }
+ })
+ }
+}
+
+type testCore struct {
+ core zapcore.Core
+}
+
+func (c *testCore) Enabled(lvl zapcore.Level) bool {
+ return c.core.Enabled(lvl)
+}
+
+func (c *testCore) With(fields []zapcore.Field) zapcore.Core {
+ c.core = c.core.With(fields)
+ return c
+}
+
+func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
+ return ce.AddCore(e, c)
+}
+
+func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error {
+ return nil
+}
+
+func (c *testCore) Sync() error {
+ return c.core.Sync()
+}
+
+func createLogger(prm Prm) (*Logger, error) {
+ prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
+ tc := testCore{core: core}
+ return &tc
+ })}
+ return NewLogger(prm)
+}
+
+func TestLoggerOutput(t *testing.T) {
+ obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel))
+
+ prm := Prm{}
+ require.NoError(t, prm.SetLevelString("debug"))
+ prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core {
+ return obs
+ })}
+ loggerMain, err := NewLogger(prm)
+ require.NoError(t, err)
+ UpdateLevelForTags(prm)
+
+ loggerMainWith := loggerMain.With(zap.String("key", "value"))
+
+ require.Panics(t, func() {
+ loggerMainWith.WithTag(TagShard)
+ })
+ loggerShard := loggerMain.WithTag(TagShard)
+ loggerShard = loggerShard.With(zap.String("key1", "value1"))
+
+ loggerMorph := loggerMain.WithTag(TagMorph)
+ loggerMorph = loggerMorph.With(zap.String("key2", "value2"))
+
+ ctx := context.Background()
+ loggerMain.Debug(ctx, "main")
+ loggerMainWith.Debug(ctx, "main with")
+ loggerShard.Debug(ctx, "shard")
+ loggerMorph.Debug(ctx, "morph")
+
+ require.Len(t, logs.All(), 4)
+ require.Len(t, logs.FilterFieldKey("key").All(), 1)
+ require.Len(t, logs.FilterFieldKey("key1").All(), 1)
+ require.Len(t, logs.FilterFieldKey("key2").All(), 1)
+ require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2)
+ require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1)
+ require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1)
+}
diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result
new file mode 100644
index 000000000..612fa2967
--- /dev/null
+++ b/pkg/util/logger/logger_test.result
@@ -0,0 +1,46 @@
+goos: linux
+goarch: amd64
+pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger
+cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
+BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op
+PASS
+ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s
diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go
new file mode 100644
index 000000000..1b98f2e62
--- /dev/null
+++ b/pkg/util/logger/tag_string.go
@@ -0,0 +1,43 @@
+// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT.
+
+package logger
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[TagMain-1]
+ _ = x[TagMorph-2]
+ _ = x[TagGrpcSvc-3]
+ _ = x[TagIr-4]
+ _ = x[TagProcessor-5]
+ _ = x[TagEngine-6]
+ _ = x[TagBlobovnicza-7]
+ _ = x[TagBlobovniczaTree-8]
+ _ = x[TagBlobstor-9]
+ _ = x[TagFSTree-10]
+ _ = x[TagGC-11]
+ _ = x[TagShard-12]
+ _ = x[TagWriteCache-13]
+ _ = x[TagDeleteSvc-14]
+ _ = x[TagGetSvc-15]
+ _ = x[TagSearchSvc-16]
+ _ = x[TagSessionSvc-17]
+ _ = x[TagTreeSvc-18]
+ _ = x[TagPolicer-19]
+ _ = x[TagReplicator-20]
+}
+
+const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator"
+
+var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148}
+
+func (i Tag) String() string {
+ i -= 1
+ if i >= Tag(len(_Tag_index)-1) {
+ return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _Tag_name[_Tag_index[i]:_Tag_index[i+1]]
+}
diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go
new file mode 100644
index 000000000..a5386707e
--- /dev/null
+++ b/pkg/util/logger/tags.go
@@ -0,0 +1,94 @@
+package logger
+
+import (
+ "fmt"
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+//go:generate stringer -type Tag -linecomment
+
+type Tag uint8
+
+const (
+ _ Tag = iota //
+ TagMain // main
+ TagMorph // morph
+ TagGrpcSvc // grpcsvc
+ TagIr // ir
+ TagProcessor // processor
+ TagEngine // engine
+ TagBlobovnicza // blobovnicza
+ TagBlobovniczaTree // blobovniczatree
+ TagBlobstor // blobstor
+ TagFSTree // fstree
+ TagGC // gc
+ TagShard // shard
+ TagWriteCache // writecache
+ TagDeleteSvc // deletesvc
+ TagGetSvc // getsvc
+ TagSearchSvc // searchsvc
+ TagSessionSvc // sessionsvc
+ TagTreeSvc // treesvc
+ TagPolicer // policer
+ TagReplicator // replicator
+
+ defaultLevel = zapcore.InfoLevel
+)
+
+var (
+ tagToLogLevel = map[Tag]zap.AtomicLevel{}
+ stringToTag = map[string]Tag{}
+)
+
+func init() {
+ for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ {
+ tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel)
+ stringToTag[i.String()] = i
+ }
+}
+
+// parseTags returns:
+// - map(always instantiated) of tag to custom log level for that tag;
+// - error if it occurred(map is empty).
+func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) {
+ m := make(map[Tag]zapcore.Level)
+ if len(raw) == 0 {
+ return m, nil
+ }
+ for _, item := range raw {
+ str, level := item[0], item[1]
+ if len(level) == 0 {
+ // It is not necessary to parse tags without level,
+ // because default log level will be used.
+ continue
+ }
+ var l zapcore.Level
+ err := l.UnmarshalText([]byte(level))
+ if err != nil {
+ return nil, err
+ }
+ tmp := strings.Split(str, ",")
+ for _, tagStr := range tmp {
+ tag, ok := stringToTag[strings.TrimSpace(tagStr)]
+ if !ok {
+ return nil, fmt.Errorf("unsupported tag %s", str)
+ }
+ m[tag] = l
+ }
+ }
+ return m, nil
+}
+
+func UpdateLevelForTags(prm Prm) {
+ for k, v := range tagToLogLevel {
+ nk, ok := prm.tl[k]
+ if ok {
+ v.SetLevel(nk)
+ } else {
+ v.SetLevel(prm.level)
+ }
+ }
+}
diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go
index f93756d17..b5b0a31eb 100644
--- a/pkg/util/logger/test/logger.go
+++ b/pkg/util/logger/test/logger.go
@@ -11,9 +11,10 @@ import (
// NewLogger creates a new logger.
func NewLogger(t testing.TB) *logger.Logger {
- var l logger.Logger
- l.Logger = zaptest.NewLogger(t,
- zaptest.Level(zapcore.DebugLevel),
- zaptest.WrapOptions(zap.Development(), zap.AddCaller()))
- return &l
+ return logger.NewLoggerWrapper(
+ zaptest.NewLogger(t,
+ zaptest.Level(zapcore.DebugLevel),
+ zaptest.WrapOptions(zap.Development(), zap.AddCaller()),
+ ),
+ )
}
diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go
index 97508f82a..a06296a07 100644
--- a/pkg/util/rand/rand.go
+++ b/pkg/util/rand/rand.go
@@ -13,7 +13,7 @@ func Uint64() uint64 {
return source.Uint64()
}
-// Uint64 returns a random uint32 value.
+// Uint32 returns a random uint32 value.
func Uint32() uint32 {
return source.Uint32()
}
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index e94ff77ad..bd15d0e8f 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -24,7 +24,7 @@ var (
errSocketIsNotInitialized = errors.New("socket is not initialized")
)
-// Initializes socket with provided name of
+// InitSocket initializes socket with provided name of
// environment variable.
func InitSocket() error {
notifySocket := os.Getenv("NOTIFY_SOCKET")
@@ -59,6 +59,8 @@ func FlagAndStatus(status string) error {
return fmt.Errorf("clock_gettime: %w", err)
}
status += "\nMONOTONIC_USEC=" + strconv.FormatInt(ts.Nano()/1000, 10)
+ status += "\nSTATUS=RELOADING"
+ return Send(status)
}
status += "\nSTATUS=" + strings.TrimSuffix(status, "=1")
return Send(status)
diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go
new file mode 100644
index 000000000..7373e538f
--- /dev/null
+++ b/pkg/util/testing/netmap_source.go
@@ -0,0 +1,36 @@
+package testing
+
+import (
+ "context"
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
+
+var (
+ errInvalidDiff = errors.New("invalid diff")
+ errNetmapNotFound = errors.New("netmap not found")
+)
+
+type TestNetmapSource struct {
+ Netmaps map[uint64]*netmap.NetMap
+ CurrentEpoch uint64
+}
+
+func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+ if diff >= s.CurrentEpoch {
+ return nil, errInvalidDiff
+ }
+ return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff)
+}
+
+func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) {
+ if nm, found := s.Netmaps[epoch]; found {
+ return nm, nil
+ }
+ return nil, errNetmapNotFound
+}
+
+func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) {
+ return s.CurrentEpoch, nil
+}
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
index d2004b673..39a420358 100644
--- a/scripts/populate-metabase/internal/generate.go
+++ b/scripts/populate-metabase/internal/generate.go
@@ -1,8 +1,10 @@
package internal
import (
+ cryptorand "crypto/rand"
"crypto/sha256"
"fmt"
+ "math/rand"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -14,14 +16,13 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
- "golang.org/x/exp/rand"
)
func GeneratePayloadPool(count uint, size uint) [][]byte {
- pool := [][]byte{}
- for i := uint(0); i < count; i++ {
+ var pool [][]byte
+ for range count {
payload := make([]byte, size)
- _, _ = rand.Read(payload)
+ _, _ = cryptorand.Read(payload)
pool = append(pool, payload)
}
@@ -29,9 +30,9 @@ func GeneratePayloadPool(count uint, size uint) [][]byte {
}
func GenerateAttributePool(count uint) []objectSDK.Attribute {
- pool := []objectSDK.Attribute{}
- for i := uint(0); i < count; i++ {
- for j := uint(0); j < count; j++ {
+ var pool []objectSDK.Attribute
+ for i := range count {
+ for j := range count {
attr := *objectSDK.NewAttribute()
attr.SetKey(fmt.Sprintf("key%d", i))
attr.SetValue(fmt.Sprintf("value%d", j))
@@ -42,8 +43,8 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute {
}
func GenerateOwnerPool(count uint) []user.ID {
- pool := []user.ID{}
- for i := uint(0); i < count; i++ {
+ var pool []user.ID
+ for range count {
pool = append(pool, usertest.ID())
}
return pool
@@ -117,8 +118,8 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption {
func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
return func(obj *objectSDK.Object) {
- attrs := []objectSDK.Attribute{}
- for i := uint(0); i < count; i++ {
+ var attrs []objectSDK.Attribute
+ for range count {
attrs = append(attrs, pool[rand.Intn(len(pool))])
}
obj.SetAttributes(attrs...)
diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go
index 390c1cdc0..fafe61eaa 100644
--- a/scripts/populate-metabase/internal/populate.go
+++ b/scripts/populate-metabase/internal/populate.go
@@ -29,15 +29,12 @@ func PopulateWithObjects(
) {
digits := "0123456789"
- for i := uint(0); i < count; i++ {
+ for range count {
obj := factory()
-
- id := []byte(fmt.Sprintf(
- "%c/%c/%c",
+ id := fmt.Appendf(nil, "%c/%c/%c",
digits[rand.Int()%len(digits)],
digits[rand.Int()%len(digits)],
- digits[rand.Int()%len(digits)],
- ))
+ digits[rand.Int()%len(digits)])
prm := meta.PutPrm{}
prm.SetObject(obj)
@@ -59,7 +56,7 @@ func PopulateWithBigObjects(
count uint,
factory func() *objectSDK.Object,
) {
- for i := uint(0); i < count; i++ {
+ for range count {
group.Go(func() error {
if err := populateWithBigObject(ctx, db, factory); err != nil {
return fmt.Errorf("couldn't put a big object: %w", err)
@@ -154,7 +151,7 @@ func PopulateGraveyard(
wg := &sync.WaitGroup{}
wg.Add(int(count))
- for i := uint(0); i < count; i++ {
+ for range count {
obj := factory()
prm := meta.PutPrm{}
@@ -226,7 +223,7 @@ func PopulateLocked(
wg := &sync.WaitGroup{}
wg.Add(int(count))
- for i := uint(0); i < count; i++ {
+ for range count {
defer wg.Done()
obj := factory()
diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go
index 2bc7a5553..8c4ea41ad 100644
--- a/scripts/populate-metabase/main.go
+++ b/scripts/populate-metabase/main.go
@@ -91,15 +91,15 @@ func populate() (err error) {
return fmt.Errorf("couldn't open the metabase: %w", err)
}
defer func() {
- if errOnClose := db.Close(); errOnClose != nil {
+ if errOnClose := db.Close(ctx); errOnClose != nil {
err = errors.Join(
err,
- fmt.Errorf("couldn't close the metabase: %w", db.Close()),
+ fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)),
)
}
}()
- if err = db.Init(); err != nil {
+ if err = db.Init(ctx); err != nil {
return fmt.Errorf("couldn't init the metabase: %w", err)
}
@@ -116,7 +116,7 @@ func populate() (err error) {
eg, ctx := errgroup.WithContext(ctx)
eg.SetLimit(int(jobs))
- for i := uint(0); i < numContainers; i++ {
+ for range numContainers {
cid := cidtest.ID()
for _, typ := range types {