diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile
deleted file mode 100644
index 4234de160..000000000
--- a/.ci/Jenkinsfile
+++ /dev/null
@@ -1,81 +0,0 @@
-def golang = ['1.23', '1.24']
-def golangDefault = "golang:${golang.last()}"
-
-async {
-
- for (version in golang) {
- def go = version
-
- task("test/go${go}") {
- container("golang:${go}") {
- sh 'make test'
- }
- }
-
- task("build/go${go}") {
- container("golang:${go}") {
- for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
- sh """
- make bin/frostfs-${app}
- bin/frostfs-${app} --version
- """
- }
- }
- }
- }
-
- task('test/race') {
- container(golangDefault) {
- sh 'make test GOFLAGS="-count=1 -race"'
- }
- }
-
- task('lint') {
- container(golangDefault) {
- sh 'make lint-install lint'
- }
- }
-
- task('staticcheck') {
- container(golangDefault) {
- sh 'make staticcheck-install staticcheck-run'
- }
- }
-
- task('gopls') {
- container(golangDefault) {
- sh 'make gopls-install gopls-run'
- }
- }
-
- task('gofumpt') {
- container(golangDefault) {
- sh '''
- make fumpt-install
- make fumpt
- git diff --exit-code --quiet
- '''
- }
- }
-
- task('vulncheck') {
- container(golangDefault) {
- sh '''
- go install golang.org/x/vuln/cmd/govulncheck@latest
- govulncheck ./...
- '''
- }
- }
-
- task('pre-commit') {
- dockerfile("""
- FROM ${golangDefault}
- RUN apt update && \
- apt install -y --no-install-recommends pre-commit
- """) {
- withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
- sh 'pre-commit run --color=always --hook-stage=manual --all-files'
- }
- }
- }
-}
diff --git a/.docker/Dockerfile.adm b/.docker/Dockerfile.adm
index 42aeebc48..5d67a1d04 100644
--- a/.docker/Dockerfile.adm
+++ b/.docker/Dockerfile.adm
@@ -1,4 +1,4 @@
-FROM golang:1.23 AS builder
+FROM golang:1.22 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ci b/.docker/Dockerfile.ci
index 9ddd8de59..e9077c831 100644
--- a/.docker/Dockerfile.ci
+++ b/.docker/Dockerfile.ci
@@ -1,4 +1,4 @@
-FROM golang:1.23
+FROM golang:1.22
WORKDIR /tmp
diff --git a/.docker/Dockerfile.cli b/.docker/Dockerfile.cli
index 16f130056..16f643b61 100644
--- a/.docker/Dockerfile.cli
+++ b/.docker/Dockerfile.cli
@@ -1,4 +1,4 @@
-FROM golang:1.23 AS builder
+FROM golang:1.22 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ir b/.docker/Dockerfile.ir
index c119f8127..f2cb764e5 100644
--- a/.docker/Dockerfile.ir
+++ b/.docker/Dockerfile.ir
@@ -1,4 +1,4 @@
-FROM golang:1.23 AS builder
+FROM golang:1.22 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.storage b/.docker/Dockerfile.storage
index 854f7adea..cf7f97748 100644
--- a/.docker/Dockerfile.storage
+++ b/.docker/Dockerfile.storage
@@ -1,4 +1,4 @@
-FROM golang:1.23 AS builder
+FROM golang:1.22 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml
index d568b9607..ce2d64dd9 100644
--- a/.forgejo/workflows/build.yml
+++ b/.forgejo/workflows/build.yml
@@ -1,10 +1,6 @@
name: Build
-on:
- pull_request:
- push:
- branches:
- - master
+on: [pull_request]
jobs:
build:
@@ -12,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go_versions: [ '1.23', '1.24' ]
+ go_versions: [ '1.22', '1.23' ]
steps:
- uses: actions/checkout@v3
diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml
index 190d7764a..7c5af8410 100644
--- a/.forgejo/workflows/dco.yml
+++ b/.forgejo/workflows/dco.yml
@@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.22'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml
deleted file mode 100644
index fe91d65f9..000000000
--- a/.forgejo/workflows/oci-image.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-name: OCI image
-
-on:
- push:
- workflow_dispatch:
-
-jobs:
- image:
- name: Build container images
- runs-on: docker
- container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
- steps:
- - name: Clone git repo
- uses: actions/checkout@v3
-
- - name: Build OCI image
- run: make images
-
- - name: Push image to OCI registry
- run: |
- echo "$REGISTRY_PASSWORD" \
- | docker login --username truecloudlab --password-stdin git.frostfs.info
- make push-images
- if: >-
- startsWith(github.ref, 'refs/tags/v') &&
- (github.event_name == 'workflow_dispatch' || github.event_name == 'push')
- env:
- REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml
index c2e293175..8b06a2fdf 100644
--- a/.forgejo/workflows/pre-commit.yml
+++ b/.forgejo/workflows/pre-commit.yml
@@ -1,10 +1,5 @@
name: Pre-commit hooks
-
-on:
- pull_request:
- push:
- branches:
- - master
+on: [pull_request]
jobs:
precommit:
@@ -21,7 +16,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: 1.24
+ go-version: 1.23
- name: Set up Python
run: |
apt update
diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml
index f3f5432ce..a908c6278 100644
--- a/.forgejo/workflows/tests.yml
+++ b/.forgejo/workflows/tests.yml
@@ -1,10 +1,5 @@
name: Tests and linters
-
-on:
- pull_request:
- push:
- branches:
- - master
+on: [pull_request]
jobs:
lint:
@@ -16,7 +11,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.23'
cache: true
- name: Install linters
@@ -30,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go_versions: [ '1.23', '1.24' ]
+ go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@@ -53,7 +48,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.22'
cache: true
- name: Run tests
@@ -68,7 +63,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.23'
cache: true
- name: Install staticcheck
@@ -104,13 +99,11 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.23'
cache: true
- name: Install gofumpt
run: make fumpt-install
- name: Run gofumpt
- run: |
- make fumpt
- git diff --exit-code --quiet
+ run: make fumpt
diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml
index bc94792d8..2951a8059 100644
--- a/.forgejo/workflows/vulncheck.yml
+++ b/.forgejo/workflows/vulncheck.yml
@@ -1,10 +1,5 @@
name: Vulncheck
-
-on:
- pull_request:
- push:
- branches:
- - master
+on: [pull_request]
jobs:
vulncheck:
@@ -18,8 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
- check-latest: true
+ go-version: '1.23'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/.forgejo/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
similarity index 100%
rename from .forgejo/ISSUE_TEMPLATE/bug_report.md
rename to .github/ISSUE_TEMPLATE/bug_report.md
diff --git a/.forgejo/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
similarity index 100%
rename from .forgejo/ISSUE_TEMPLATE/config.yml
rename to .github/ISSUE_TEMPLATE/config.yml
diff --git a/.forgejo/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
similarity index 100%
rename from .forgejo/ISSUE_TEMPLATE/feature_request.md
rename to .github/ISSUE_TEMPLATE/feature_request.md
diff --git a/.forgejo/logo.svg b/.github/logo.svg
similarity index 100%
rename from .forgejo/logo.svg
rename to .github/logo.svg
diff --git a/.golangci.yml b/.golangci.yml
index e3ec09f60..971f0d0e7 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,107 +1,87 @@
-version: "2"
+# This file contains all available configuration options
+# with their default values.
+
+# options for analysis running
run:
+ # timeout for analysis, e.g. 30s, 5m, default is 1m
+ timeout: 20m
+
+ # include test files or not, default is true
tests: false
+
+# output configuration options
output:
+ # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats:
- tab:
- path: stdout
- colors: false
+ - format: tab
+
+# all available settings of specific linters
+linters-settings:
+ exhaustive:
+ # indicates that switch statements are to be considered exhaustive if a
+ # 'default' case is present, even if all enum members aren't listed in the
+ # switch
+ default-signifies-exhaustive: true
+ govet:
+ # report about shadowed variables
+ check-shadowing: false
+ staticcheck:
+ checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
+ funlen:
+ lines: 80 # default 60
+ statements: 60 # default 40
+ gocognit:
+ min-complexity: 40 # default 30
+ importas:
+ no-unaliased: true
+ no-extra-aliases: false
+ alias:
+ pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
+ alias: objectSDK
+ custom:
+ truecloudlab-linters:
+ path: bin/linters/external_linters.so
+ original-url: git.frostfs.info/TrueCloudLab/linters.git
+ settings:
+ noliteral:
+ target-methods : ["reportFlushError", "reportError"]
+ disable-packages: ["codes", "err", "res","exec"]
+ constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+
linters:
- default: none
enable:
- - bidichk
- - containedctx
- - contextcheck
- - copyloopvar
- - durationcheck
- - errcheck
- - exhaustive
- - funlen
- - gocognit
- - gocritic
- - godot
- - importas
- - ineffassign
- - intrange
- - misspell
- - perfsprint
- - predeclared
- - protogetter
- - reassign
+ # mandatory linters
+ - govet
- revive
+
+ # some default golangci-lint linters
+ - errcheck
+ - gosimple
+ - godot
+ - ineffassign
- staticcheck
- - testifylint
- - truecloudlab-linters
- - unconvert
- - unparam
+ - typecheck
- unused
- - usetesting
- - whitespace
- settings:
- exhaustive:
- default-signifies-exhaustive: true
- funlen:
- lines: 80
- statements: 60
- gocognit:
- min-complexity: 40
- gocritic:
- disabled-checks:
- - ifElseChain
- importas:
- alias:
- - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
- alias: objectSDK
- no-unaliased: true
- no-extra-aliases: false
- staticcheck:
- checks:
- - all
- - -QF1002
- unused:
- field-writes-are-uses: false
- exported-fields-are-used: false
- local-variables-are-used: false
- custom:
- truecloudlab-linters:
- path: bin/linters/external_linters.so
- original-url: git.frostfs.info/TrueCloudLab/linters.git
- settings:
- noliteral:
- constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs
- disable-packages:
- - codes
- - err
- - res
- - exec
- target-methods:
- - reportFlushError
- - reportError
- exclusions:
- generated: lax
- presets:
- - comments
- - common-false-positives
- - legacy
- - std-error-handling
- paths:
- - third_party$
- - builtin$
- - examples$
-formatters:
- enable:
- - gci
+
+ # extra linters
+ - bidichk
+ - durationcheck
+ - exhaustive
+ - copyloopvar
- gofmt
- goimports
- settings:
- gci:
- sections:
- - standard
- - default
- custom-order: true
- exclusions:
- generated: lax
- paths:
- - third_party$
- - builtin$
- - examples$
+ - misspell
+ - predeclared
+ - reassign
+ - whitespace
+ - containedctx
+ - funlen
+ - gocognit
+ - contextcheck
+ - importas
+ - truecloudlab-linters
+ - perfsprint
+ - testifylint
+ - protogetter
+ disable-all: true
+ fast: false
diff --git a/.woodpecker/pre-commit.yml b/.woodpecker/pre-commit.yml
new file mode 100644
index 000000000..bdf3402de
--- /dev/null
+++ b/.woodpecker/pre-commit.yml
@@ -0,0 +1,11 @@
+pipeline:
+ # Kludge for non-root containers under WoodPecker
+ fix-ownership:
+ image: alpine:latest
+ commands: chown -R 1234:1234 .
+
+ pre-commit:
+ image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36
+ commands:
+ - export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)"
+ - pre-commit run --hook-stage manual
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 92c84ab16..e4ba6a5d6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,30 +9,6 @@ Changelog for FrostFS Node
### Removed
### Updated
-## [v0.44.0] - 2024-25-11 - Rongbuk
-
-### Added
-- Allow to prioritize nodes during GET traversal via attributes (#1439)
-- Add metrics for the frostfsid cache (#1464)
-- Customize constant attributes attached to every tracing span (#1488)
-- Manage additional keys in the `frostfsid` contract (#1505)
-- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519)
-
-### Changed
-- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396)
-- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515)
-
-### Fixed
-- Fix EC object search (#1408)
-- Fix EC object put when one of the nodes is unavailable (#1427)
-
-### Removed
-- Drop most of the eACL-related code (#1425)
-- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483)
-
-### Upgrading from v0.43.0
-The metabase schema has changed completely, resync is required.
-
## [v0.42.0]
### Added
diff --git a/CODEOWNERS b/CODEOWNERS
deleted file mode 100644
index d19c96a5c..000000000
--- a/CODEOWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
-.forgejo/.* @potyarkin
-Makefile @potyarkin
diff --git a/Makefile b/Makefile
index 575eaae6f..2f29ac19c 100755
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,5 @@
#!/usr/bin/make -f
SHELL = bash
-.SHELLFLAGS = -euo pipefail -c
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
@@ -8,16 +7,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
-GO_VERSION ?= 1.23
-LINT_VERSION ?= 2.0.2
-TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
+GO_VERSION ?= 1.22
+LINT_VERSION ?= 1.60.3
+TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
PROTOC_VERSION ?= 25.0
-PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
+PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux)
PROTOC_OS_VERSION=linux-x86_64
endif
-STATICCHECK_VERSION ?= 2025.1.1
+STATICCHECK_VERSION ?= 2024.1.1
ARCH = amd64
BIN = bin
@@ -28,6 +27,12 @@ DIRS = $(BIN) $(RELEASE)
CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*)))
BINS = $(addprefix $(BIN)/, $(CMDS))
+# .deb package versioning
+OS_RELEASE = $(shell lsb_release -cs)
+PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
+ sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
+ sed "s/-/~/")-${OS_RELEASE}
+
OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
@@ -43,7 +48,7 @@ GOFUMPT_VERSION ?= v0.7.0
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
-GOPLS_VERSION ?= v0.17.1
+GOPLS_VERSION ?= v0.15.1
GOPLS_DIR ?= $(abspath $(BIN))/gopls
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
GOPLS_TEMP_FILE := $(shell mktemp)
@@ -53,7 +58,7 @@ LOCODE_DB_PATH=$(abspath ./.cache/locode_db)
LOCODE_DB_VERSION=v0.4.0
.PHONY: help all images dep clean fmts fumpt imports test lint docker/lint
- prepare-release pre-commit unpre-commit
+ prepare-release debpackage pre-commit unpre-commit
# To build a specific binary, use it's name prefix with bin/ as a target
# For example `make bin/frostfs-node` will build only storage node binary
@@ -116,13 +121,13 @@ protoc:
# Install protoc
protoc-install:
@rm -rf $(PROTOBUF_DIR)
- @mkdir -p $(PROTOBUF_DIR)
+ @mkdir $(PROTOBUF_DIR)
@echo "⇒ Installing protoc... "
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip
@echo "⇒ Instaling protogen FrostFS plugin..."
- @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
+ @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
# Build FrostFS component's docker image
image-%:
@@ -140,15 +145,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
-# Push FrostFS components' docker image to the registry
-push-image-%:
- @echo "⇒ Publish FrostFS $* docker image "
- @docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
-
-# Push all Docker images to the registry
-.PHONY: push-images
-push-images: push-image-storage push-image-ir push-image-cli push-image-adm
-
# Run `make %` in Golang container
docker/%:
docker run --rm -t \
@@ -170,7 +166,7 @@ imports:
# Install gofumpt
fumpt-install:
@rm -rf $(GOFUMPT_DIR)
- @mkdir -p $(GOFUMPT_DIR)
+ @mkdir $(GOFUMPT_DIR)
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
# Run gofumpt
@@ -187,44 +183,21 @@ test:
@echo "⇒ Running go test"
@GOFLAGS="$(GOFLAGS)" go test ./...
-# Install Gerrit commit-msg hook
-review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
-review-install:
- @git config remote.review.url \
- || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
- @mkdir -p $(GIT_HOOK_DIR)/
- @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
- @chmod +x $(GIT_HOOK_DIR)/commit-msg
- @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
- @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
-
-# Create a PR in Gerrit
-review: BRANCH ?= master
-review:
- @git push review HEAD:refs/for/$(BRANCH) \
- --push-option r=e.stratonikov@yadro.com \
- --push-option r=d.stepanov@yadro.com \
- --push-option r=an.nikiforov@yadro.com \
- --push-option r=a.arifullin@yadro.com \
- --push-option r=ekaterina.lebedeva@yadro.com \
- --push-option r=a.savchuk@yadro.com \
- --push-option r=a.chuprov@yadro.com
-
# Run pre-commit
pre-commit-run:
@pre-commit run -a --hook-stage manual
# Install linters
-lint-install: $(BIN)
+lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
- @mkdir -p $(OUTPUT_LINT_DIR)
+ @mkdir $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
- @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION)
+ @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
@@ -236,7 +209,7 @@ lint:
# Install staticcheck
staticcheck-install:
@rm -rf $(STATICCHECK_DIR)
- @mkdir -p $(STATICCHECK_DIR)
+ @mkdir $(STATICCHECK_DIR)
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
# Run staticcheck
@@ -249,7 +222,7 @@ staticcheck-run:
# Install gopls
gopls-install:
@rm -rf $(GOPLS_DIR)
- @mkdir -p $(GOPLS_DIR)
+ @mkdir $(GOPLS_DIR)
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
# Run gopls
@@ -290,6 +263,19 @@ clean:
rm -rf $(BIN)
rm -rf $(RELEASE)
+# Package for Debian
+debpackage:
+ dch -b --package frostfs-node \
+ --controlmaint \
+ --newversion $(PKG_VERSION) \
+ --distribution $(OS_RELEASE) \
+ "Please see CHANGELOG.md for code changes for $(VERSION)"
+ dpkg-buildpackage --no-sign -b
+
+# Cleanup deb package build directories
+debclean:
+ dh clean
+
# Download locode database
locode-download:
mkdir -p $(TMP_DIR)
@@ -303,12 +289,10 @@ env-up: all
echo "Frostfs contracts not found"; exit 1; \
fi
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
- ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
- --storage-wallet ./dev/storage/wallet01.json \
- --storage-wallet ./dev/storage/wallet02.json \
- --storage-wallet ./dev/storage/wallet03.json \
- --storage-wallet ./dev/storage/wallet04.json
-
+ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
+ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
+ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
+ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
make locode-download; \
fi
@@ -317,6 +301,7 @@ env-up: all
# Shutdown dev environment
env-down:
- docker compose -f dev/docker-compose.yml down -v
+ docker compose -f dev/docker-compose.yml down
+ docker volume rm -f frostfs-node_neo-go
rm -rf ./$(TMP_DIR)/state
rm -rf ./$(TMP_DIR)/storage
diff --git a/README.md b/README.md
index 0109ed0e5..47d812b18 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
@@ -98,7 +98,7 @@ See `frostfs-contract`'s README.md for build instructions.
4. To create container and put object into it run (container and object IDs will be different):
```
-./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --await
+./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --basic-acl public-read-write --await
Enter password > <- press ENTER, the is no password for wallet
CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju
diff --git a/VERSION b/VERSION
index 9052dab96..01efe7f3a 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-v0.44.0
+v0.42.0
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index f194e97f5..81395edb0 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -16,18 +16,10 @@ const (
EndpointFlagDesc = "N3 RPC node endpoint"
EndpointFlagShort = "r"
- WalletPath = "wallet"
- WalletPathShorthand = "w"
- WalletPathUsage = "Path to the wallet"
-
AlphabetWalletsFlag = "alphabet-wallets"
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
- AdminWalletPath = "wallet-admin"
- AdminWalletUsage = "Path to the admin wallet"
-
LocalDumpFlag = "local-dump"
- ProtoConfigPath = "protocol"
ContractsInitFlag = "contracts"
ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)"
ContractsURLFlag = "contracts-url"
diff --git a/cmd/frostfs-adm/internal/modules/config/config.go b/cmd/frostfs-adm/internal/modules/config/config.go
index 69153f0d7..a98245d01 100644
--- a/cmd/frostfs-adm/internal/modules/config/config.go
+++ b/cmd/frostfs-adm/internal/modules/config/config.go
@@ -128,7 +128,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
var i innerring.GlagoliticLetter
- for i = range innerring.GlagoliticLetter(credSize) {
+ for i = 0; i < innerring.GlagoliticLetter(credSize); i++ {
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go
deleted file mode 100644
index d67b70d2a..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/root.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package maintenance
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie"
- "github.com/spf13/cobra"
-)
-
-var RootCmd = &cobra.Command{
- Use: "maintenance",
- Short: "Section for maintenance commands",
-}
-
-func init() {
- RootCmd.AddCommand(zombie.Cmd)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
deleted file mode 100644
index 1b66889aa..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package zombie
-
-import (
- "crypto/ecdsa"
- "fmt"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/cli/flags"
- "github.com/nspcc-dev/neo-go/cli/input"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey {
- keyDesc := viper.GetString(walletFlag)
- if keyDesc == "" {
- return &nodeconfig.Key(appCfg).PrivateKey
- }
- data, err := os.ReadFile(keyDesc)
- commonCmd.ExitOnErr(cmd, "open wallet file: %w", err)
-
- priv, err := keys.NewPrivateKeyFromBytes(data)
- if err != nil {
- w, err := wallet.NewWalletFromFile(keyDesc)
- commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err)
- return fromWallet(cmd, w, viper.GetString(addressFlag))
- }
- return &priv.PrivateKey
-}
-
-func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey {
- var (
- addr util.Uint160
- err error
- )
-
- if addrStr == "" {
- addr = w.GetChangeAddress()
- } else {
- addr, err = flags.ParseAddress(addrStr)
- commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err)
- }
-
- acc := w.GetAccount(addr)
- if acc == nil {
- commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr))
- }
-
- pass, err := getPassword()
- commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err)
-
- commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams()))
-
- return &acc.PrivateKey().PrivateKey
-}
-
-func getPassword() (string, error) {
- // this check allows empty passwords
- if viper.IsSet("password") {
- return viper.GetString("password"), nil
- }
-
- return input.ReadPassword("Enter password > ")
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
deleted file mode 100644
index f73f33db9..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package zombie
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func list(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- var containerID *cid.ID
- if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" {
- containerID = &cid.ID{}
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
- }
-
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error {
- if containerID != nil && a.Container() != *containerID {
- return nil
- }
- cmd.Println(a.EncodeToString())
- return nil
- }))
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
deleted file mode 100644
index cd3a64499..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package zombie
-
-import (
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
- nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "github.com/spf13/cobra"
-)
-
-func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client {
- addresses := morphconfig.RPCEndpoint(appCfg)
- if len(addresses) == 0 {
- commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found"))
- }
- key := nodeconfig.Key(appCfg)
- cli, err := client.New(cmd.Context(),
- key,
- client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
- client.WithEndpoints(addresses...),
- client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
- )
- commonCmd.ExitOnErr(cmd, "create morph client: %w", err)
- return cli
-}
-
-func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client {
- hs, err := morph.NNSContractAddress(client.NNSContainerContractName)
- commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err)
- cc, err := cntClient.NewFromMorph(morph, hs, 0)
- commonCmd.ExitOnErr(cmd, "create morph container client: %w", err)
- return cc
-}
-
-func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client {
- hs, err := morph.NNSContractAddress(client.NNSNetmapContractName)
- commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err)
- cli, err := netmapClient.NewFromMorph(morph, hs, 0)
- commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err)
- return cli
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
deleted file mode 100644
index 27f83aec7..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package zombie
-
-import (
- "context"
- "fmt"
- "math"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-type quarantine struct {
- // mtx protects current field.
- mtx sync.Mutex
- current int
- trees []*fstree.FSTree
-}
-
-func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine {
- var paths []string
- for _, sh := range engineInfo.Shards {
- var storagePaths []string
- for _, st := range sh.BlobStorInfo.SubStorages {
- storagePaths = append(storagePaths, st.Path)
- }
- if len(storagePaths) == 0 {
- continue
- }
- paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine"))
- }
- q, err := newQuarantine(paths)
- commonCmd.ExitOnErr(cmd, "create quarantine: %w", err)
- return q
-}
-
-func commonPath(paths []string) string {
- if len(paths) == 0 {
- return ""
- }
- if len(paths) == 1 {
- return paths[0]
- }
- minLen := math.MaxInt
- for _, p := range paths {
- if len(p) < minLen {
- minLen = len(p)
- }
- }
-
- var sb strings.Builder
- for i := range minLen {
- for _, path := range paths[1:] {
- if paths[0][i] != path[i] {
- return sb.String()
- }
- }
- sb.WriteByte(paths[0][i])
- }
- return sb.String()
-}
-
-func newQuarantine(paths []string) (*quarantine, error) {
- var q quarantine
- for i := range paths {
- f := fstree.New(
- fstree.WithDepth(1),
- fstree.WithDirNameLen(1),
- fstree.WithPath(paths[i]),
- fstree.WithPerm(os.ModePerm),
- )
- if err := f.Open(mode.ComponentReadWrite); err != nil {
- return nil, fmt.Errorf("open fstree %s: %w", paths[i], err)
- }
- if err := f.Init(); err != nil {
- return nil, fmt.Errorf("init fstree %s: %w", paths[i], err)
- }
- q.trees = append(q.trees, f)
- }
- return &q, nil
-}
-
-func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
- for i := range q.trees {
- res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a})
- if err != nil {
- continue
- }
- return res.Object, nil
- }
- return nil, &apistatus.ObjectNotFound{}
-}
-
-func (q *quarantine) Delete(ctx context.Context, a oid.Address) error {
- for i := range q.trees {
- _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a})
- if err != nil {
- continue
- }
- return nil
- }
- return &apistatus.ObjectNotFound{}
-}
-
-func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error {
- data, err := obj.Marshal()
- if err != nil {
- return err
- }
-
- var prm common.PutPrm
- prm.Address = objectcore.AddressOf(obj)
- prm.Object = obj
- prm.RawData = data
-
- q.mtx.Lock()
- current := q.current
- q.current = (q.current + 1) % len(q.trees)
- q.mtx.Unlock()
-
- _, err = q.trees[current].Put(ctx, prm)
- return err
-}
-
-func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error {
- var prm common.IteratePrm
- prm.Handler = func(elem common.IterationElement) error {
- return f(elem.Address)
- }
- for i := range q.trees {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- _, err := q.trees[i].Iterate(ctx, prm)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
deleted file mode 100644
index 0b8f2f172..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package zombie
-
-import (
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func remove(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
-
- var containerID cid.ID
- cidStr, _ := cmd.Flags().GetString(cidFlag)
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
-
- var objectID *oid.ID
- oidStr, _ := cmd.Flags().GetString(oidFlag)
- if oidStr != "" {
- objectID = &oid.ID{}
- commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
- }
-
- if objectID != nil {
- var addr oid.Address
- addr.SetContainer(containerID)
- addr.SetObject(*objectID)
- removeObject(cmd, q, addr)
- } else {
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
- if addr.Container() != containerID {
- return nil
- }
- removeObject(cmd, q, addr)
- return nil
- }))
- }
-}
-
-func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) {
- err := q.Delete(cmd.Context(), addr)
- if errors.Is(err, new(apistatus.ObjectNotFound)) {
- return
- }
- commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
deleted file mode 100644
index f179c7c2d..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package zombie
-
-import (
- "crypto/sha256"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func restore(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- morphClient := createMorphClient(cmd, appCfg)
- cnrCli := createContainerClient(cmd, morphClient)
-
- var containerID cid.ID
- cidStr, _ := cmd.Flags().GetString(cidFlag)
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
-
- var objectID *oid.ID
- oidStr, _ := cmd.Flags().GetString(oidFlag)
- if oidStr != "" {
- objectID = &oid.ID{}
- commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
- }
-
- if objectID != nil {
- var addr oid.Address
- addr.SetContainer(containerID)
- addr.SetObject(*objectID)
- restoreObject(cmd, storageEngine, q, addr, cnrCli)
- } else {
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
- if addr.Container() != containerID {
- return nil
- }
- restoreObject(cmd, storageEngine, q, addr, cnrCli)
- return nil
- }))
- }
-}
-
-func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) {
- obj, err := q.Get(cmd.Context(), addr)
- commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err)
- rawCID := make([]byte, sha256.Size)
-
- cid := addr.Container()
- cid.Encode(rawCID)
- cnr, err := cnrCli.Get(cmd.Context(), rawCID)
- commonCmd.ExitOnErr(cmd, "get container: %w", err)
-
- putPrm := engine.PutPrm{
- Object: obj,
- IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value),
- }
- commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm))
- commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr))
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
deleted file mode 100644
index c8fd9e5e5..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package zombie
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- flagBatchSize = "batch-size"
- flagBatchSizeUsage = "Objects iteration batch size"
- cidFlag = "cid"
- cidFlagUsage = "Container ID"
- oidFlag = "oid"
- oidFlagUsage = "Object ID"
- walletFlag = "wallet"
- walletFlagShorthand = "w"
- walletFlagUsage = "Path to the wallet or binary key"
- addressFlag = "address"
- addressFlagUsage = "Address of wallet account"
- moveFlag = "move"
- moveFlagUsage = "Move objects from storage engine to quarantine"
-)
-
-var (
- Cmd = &cobra.Command{
- Use: "zombie",
- Short: "Zombie objects related commands",
- }
- scanCmd = &cobra.Command{
- Use: "scan",
- Short: "Scan storage engine for zombie objects and move them to quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag))
- _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag))
- _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize))
- _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag))
- },
- Run: scan,
- }
- listCmd = &cobra.Command{
- Use: "list",
- Short: "List zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- },
- Run: list,
- }
- restoreCmd = &cobra.Command{
- Use: "restore",
- Short: "Restore zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
- },
- Run: restore,
- }
- removeCmd = &cobra.Command{
- Use: "remove",
- Short: "Remove zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
- },
- Run: remove,
- }
-)
-
-func init() {
- initScanCmd()
- initListCmd()
- initRestoreCmd()
- initRemoveCmd()
-}
-
-func initScanCmd() {
- Cmd.AddCommand(scanCmd)
-
- scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage)
- scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage)
- scanCmd.Flags().String(addressFlag, "", addressFlagUsage)
- scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage)
-}
-
-func initListCmd() {
- Cmd.AddCommand(listCmd)
-
- listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- listCmd.Flags().String(cidFlag, "", cidFlagUsage)
-}
-
-func initRestoreCmd() {
- Cmd.AddCommand(restoreCmd)
-
- restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- restoreCmd.Flags().String(cidFlag, "", cidFlagUsage)
- restoreCmd.Flags().String(oidFlag, "", oidFlagUsage)
-}
-
-func initRemoveCmd() {
- Cmd.AddCommand(removeCmd)
-
- removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- removeCmd.Flags().String(cidFlag, "", cidFlagUsage)
- removeCmd.Flags().String(oidFlag, "", oidFlagUsage)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
deleted file mode 100644
index 268ec4911..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
+++ /dev/null
@@ -1,281 +0,0 @@
-package zombie
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/sha256"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
- clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
- "golang.org/x/sync/errgroup"
-)
-
-func scan(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- batchSize, _ := cmd.Flags().GetUint32(flagBatchSize)
- if batchSize == 0 {
- commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value"))
- }
- move, _ := cmd.Flags().GetBool(moveFlag)
-
- storageEngine := newEngine(cmd, appCfg)
- morphClient := createMorphClient(cmd, appCfg)
- cnrCli := createContainerClient(cmd, morphClient)
- nmCli := createNetmapClient(cmd, morphClient)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- pk := getPrivateKey(cmd, appCfg)
-
- epoch, err := nmCli.Epoch(cmd.Context())
- commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err)
-
- nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch)
- commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err)
-
- cmd.Printf("Epoch: %d\n", nm.Epoch())
- cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes()))
-
- ps := &processStatus{
- statusCount: make(map[status]uint64),
- }
-
- stopCh := make(chan struct{})
- start := time.Now()
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- defer wg.Done()
- tick := time.NewTicker(time.Second)
- defer tick.Stop()
- for {
- select {
- case <-cmd.Context().Done():
- return
- case <-stopCh:
- return
- case <-tick.C:
- fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start))
- }
- }
- }()
- go func() {
- defer wg.Done()
- err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move)
- close(stopCh)
- }()
- wg.Wait()
- commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err)
-
- cmd.Println()
- cmd.Println("Status description:")
- cmd.Println("undefined -- nothing is clear")
- cmd.Println("found -- object is found in cluster")
- cmd.Println("quarantine -- object is not found in cluster")
- cmd.Println()
- for status, count := range ps.statusCount {
- cmd.Printf("Status: %s, Count: %d\n", status, count)
- }
-}
-
-type status string
-
-const (
- statusUndefined status = "undefined"
- statusFound status = "found"
- statusQuarantine status = "quarantine"
-)
-
-func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) {
- rawCID := make([]byte, sha256.Size)
- cid := obj.Address.Container()
- cid.Encode(rawCID)
-
- cnr, err := cnrCli.Get(ctx, rawCID)
- if err != nil {
- var errContainerNotFound *apistatus.ContainerNotFound
- if errors.As(err, &errContainerNotFound) {
- // Policer will deal with this object.
- return statusFound, nil
- }
- return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err)
- }
- nm, err := nmCli.NetMap(ctx)
- if err != nil {
- return statusUndefined, fmt.Errorf("read netmap from morph: %w", err)
- }
-
- nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID)
- if err != nil {
- // Not enough nodes, check all netmap nodes.
- nodes = append([][]netmap.NodeInfo{}, nm.Nodes())
- }
-
- objID := obj.Address.Object()
- cnrID := obj.Address.Container()
- local := true
- raw := false
- if obj.ECInfo != nil {
- objID = obj.ECInfo.ParentID
- local = false
- raw = true
- }
- prm := clientSDK.PrmObjectHead{
- ObjectID: &objID,
- ContainerID: &cnrID,
- Local: local,
- Raw: raw,
- }
-
- var ni clientCore.NodeInfo
- for i := range nodes {
- for j := range nodes[i] {
- if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil {
- return statusUndefined, fmt.Errorf("parse node info: %w", err)
- }
- c, err := cc.Get(ni)
- if err != nil {
- continue
- }
- res, err := c.ObjectHead(ctx, prm)
- if err != nil {
- var errECInfo *objectSDK.ECInfoError
- if raw && errors.As(err, &errECInfo) {
- return statusFound, nil
- }
- continue
- }
- if err := apistatus.ErrFromStatus(res.Status()); err != nil {
- continue
- }
- return statusFound, nil
- }
- }
-
- if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 {
- return statusFound, nil
- }
- return statusQuarantine, nil
-}
-
-func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus,
- appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool,
-) error {
- cc := cache.NewSDKClientCache(cache.ClientCacheOpts{
- DialTimeout: apiclientconfig.DialTimeout(appCfg),
- StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
- ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
- Key: pk,
- AllowExternal: apiclientconfig.AllowExternal(appCfg),
- })
- ctx := cmd.Context()
-
- var cursor *engine.Cursor
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var prm engine.ListWithCursorPrm
- prm.WithCursor(cursor)
- prm.WithCount(batchSize)
-
- res, err := storageEngine.ListWithCursor(ctx, prm)
- if err != nil {
- if errors.Is(err, engine.ErrEndOfListing) {
- return nil
- }
- return fmt.Errorf("list with cursor: %w", err)
- }
-
- cursor = res.Cursor()
- addrList := res.AddressList()
- eg, egCtx := errgroup.WithContext(ctx)
- eg.SetLimit(int(batchSize))
-
- for i := range addrList {
- addr := addrList[i]
- eg.Go(func() error {
- result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr)
- if err != nil {
- return fmt.Errorf("check object %s status: %w", addr.Address, err)
- }
- ps.add(result)
-
- if !move && result == statusQuarantine {
- cmd.Println(addr)
- return nil
- }
-
- if result == statusQuarantine {
- return moveToQuarantine(egCtx, storageEngine, q, addr.Address)
- }
- return nil
- })
- }
- if err := eg.Wait(); err != nil {
- return fmt.Errorf("process objects batch: %w", err)
- }
- }
-}
-
-func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error {
- var getPrm engine.GetPrm
- getPrm.WithAddress(addr)
- res, err := storageEngine.Get(ctx, getPrm)
- if err != nil {
- return fmt.Errorf("get object %s from storage engine: %w", addr, err)
- }
-
- if err := q.Put(ctx, res.Object()); err != nil {
- return fmt.Errorf("put object %s to quarantine: %w", addr, err)
- }
-
- var delPrm engine.DeletePrm
- delPrm.WithForceRemoval()
- delPrm.WithAddress(addr)
-
- if err = storageEngine.Delete(ctx, delPrm); err != nil {
- return fmt.Errorf("delete object %s from storage engine: %w", addr, err)
- }
- return nil
-}
-
-type processStatus struct {
- guard sync.RWMutex
- statusCount map[status]uint64
- count uint64
-}
-
-func (s *processStatus) add(st status) {
- s.guard.Lock()
- defer s.guard.Unlock()
- s.statusCount[st]++
- s.count++
-}
-
-func (s *processStatus) total() uint64 {
- s.guard.RLock()
- defer s.guard.RUnlock()
- return s.count
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
deleted file mode 100644
index 5be34d502..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package zombie
-
-import (
- "context"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
- shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
- blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
- fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/panjf2000/ants/v2"
- "github.com/spf13/cobra"
- "go.etcd.io/bbolt"
- "go.uber.org/zap"
-)
-
-func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
- ngOpts := storageEngineOptions(c)
- shardOpts := shardOptions(cmd, c)
- e := engine.New(ngOpts...)
- for _, opts := range shardOpts {
- _, err := e.AddShard(cmd.Context(), opts...)
- commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
- }
- commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
- commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
- return e
-}
-
-func storageEngineOptions(c *config.Config) []engine.Option {
- return []engine.Option{
- engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
- engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
- }
-}
-
-func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
- var result [][]shard.Option
- err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
- result = append(result, getShardOpts(cmd, c, sh))
- return nil
- })
- commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
- return result
-}
-
-func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
- wc, wcEnabled := getWriteCacheOpts(sh)
- return []shard.Option{
- shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- shard.WithRefillMetabase(sh.RefillMetabase()),
- shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
- shard.WithMode(sh.Mode()),
- shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
- shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
- shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
- shard.WithWriteCache(wcEnabled),
- shard.WithWriteCacheOptions(wc),
- shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
- shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
- shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
- shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
- return pool
- }),
- shard.WithLimiter(qos.NewNoopLimiter()),
- }
-}
-
-func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
- if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
- var result []writecache.Option
- result = append(result,
- writecache.WithPath(wc.Path()),
- writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
- writecache.WithMaxObjectSize(wc.MaxObjectSize()),
- writecache.WithFlushWorkersCount(wc.WorkerCount()),
- writecache.WithMaxCacheSize(wc.SizeLimit()),
- writecache.WithMaxCacheCount(wc.CountLimit()),
- writecache.WithNoSync(wc.NoSync()),
- writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- writecache.WithQoSLimiter(qos.NewNoopLimiter()),
- )
- return result, true
- }
- return nil, false
-}
-
-func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
- var piloramaOpts []pilorama.Option
- if config.BoolSafe(c.Sub("tree"), "enabled") {
- pr := sh.Pilorama()
- piloramaOpts = append(piloramaOpts,
- pilorama.WithPath(pr.Path()),
- pilorama.WithPerm(pr.Perm()),
- pilorama.WithNoSync(pr.NoSync()),
- pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
- pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
- )
- }
- return piloramaOpts
-}
-
-func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
- return []meta.Option{
- meta.WithPath(sh.Metabase().Path()),
- meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
- meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
- meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
- meta.WithBoltDBOptions(&bbolt.Options{
- Timeout: 100 * time.Millisecond,
- }),
- meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- meta.WithEpochState(&epochState{}),
- }
-}
-
-func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
- result := []blobstor.Option{
- blobstor.WithCompression(sh.Compression()),
- blobstor.WithStorages(getSubStorages(ctx, sh)),
- blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- }
-
- return result
-}
-
-func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
- var ss []blobstor.SubStorage
- for _, storage := range sh.BlobStor().Storages() {
- switch storage.Type() {
- case blobovniczatree.Type:
- sub := blobovniczaconfig.From((*config.Config)(storage))
- blobTreeOpts := []blobovniczatree.Option{
- blobovniczatree.WithRootPath(storage.Path()),
- blobovniczatree.WithPermissions(storage.Perm()),
- blobovniczatree.WithBlobovniczaSize(sub.Size()),
- blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
- blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
- blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
- blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
- blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
- blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
- blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
- blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())),
- blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())),
- blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
- }
-
- ss = append(ss, blobstor.SubStorage{
- Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
- Policy: func(_ *objectSDK.Object, data []byte) bool {
- return uint64(len(data)) < sh.SmallSizeLimit()
- },
- })
- case fstree.Type:
- sub := fstreeconfig.From((*config.Config)(storage))
- fstreeOpts := []fstree.Option{
- fstree.WithPath(storage.Path()),
- fstree.WithPerm(storage.Perm()),
- fstree.WithDepth(sub.Depth()),
- fstree.WithNoSync(sub.NoSync()),
- fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- }
-
- ss = append(ss, blobstor.SubStorage{
- Storage: fstree.New(fstreeOpts...),
- Policy: func(_ *objectSDK.Object, _ []byte) bool {
- return true
- },
- })
- default:
- // should never happen, that has already
- // been handled: when the config was read
- }
- }
- return ss
-}
-
-type epochState struct{}
-
-func (epochState) CurrentEpoch() uint64 {
- return 0
-}
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
index c0c290c5e..96cb62f10 100644
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -1,7 +1,6 @@
package metabase
import (
- "context"
"errors"
"fmt"
"sync"
@@ -11,25 +10,19 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
- morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
- nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- morphcontainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
const (
+ pathFlag = "path"
noCompactFlag = "no-compact"
)
-var (
- errNoPathsFound = errors.New("no metabase paths found")
- errNoMorphEndpointsFound = errors.New("no morph endpoints found")
- errUpgradeFailed = errors.New("upgrade failed")
-)
+var errNoPathsFound = errors.New("no metabase paths found")
+
+var path string
var UpgradeCmd = &cobra.Command{
Use: "upgrade",
@@ -46,10 +39,17 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
+ noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
+ var paths []string
+ if path != "" {
+ paths = append(paths, path)
+ }
appCfg := config.New(configFile, configDir, config.EnvPrefix)
- paths, err := getMetabasePaths(appCfg)
- if err != nil {
- return err
+ if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
+ paths = append(paths, sc.Metabase().Path())
+ return nil
+ }); err != nil {
+ return fmt.Errorf("failed to get metabase paths: %w", err)
}
if len(paths) == 0 {
return errNoPathsFound
@@ -58,16 +58,6 @@ func upgrade(cmd *cobra.Command, _ []string) error {
for i, path := range paths {
cmd.Println(i+1, ":", path)
}
- mc, err := createMorphClient(cmd.Context(), appCfg)
- if err != nil {
- return err
- }
- defer mc.Close()
- civ, err := createContainerInfoProvider(mc)
- if err != nil {
- return err
- }
- noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
result := make(map[string]bool)
var resultGuard sync.Mutex
eg, ctx := errgroup.WithContext(cmd.Context())
@@ -75,7 +65,7 @@ func upgrade(cmd *cobra.Command, _ []string) error {
eg.Go(func() error {
var success bool
cmd.Println("upgrading metabase", path, "...")
- if err := meta.Upgrade(ctx, path, !noCompact, civ, func(a ...any) {
+ if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) {
cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
}); err != nil {
cmd.Println("error: failed to upgrade metabase", path, ":", err)
@@ -92,65 +82,18 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil {
return err
}
- allSuccess := true
for mb, ok := range result {
if ok {
cmd.Println(mb, ": success")
} else {
cmd.Println(mb, ": failed")
- allSuccess = false
}
}
- if allSuccess {
- return nil
- }
- return errUpgradeFailed
-}
-
-func getMetabasePaths(appCfg *config.Config) ([]string, error) {
- var paths []string
- if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
- paths = append(paths, sc.Metabase().Path())
- return nil
- }); err != nil {
- return nil, fmt.Errorf("get metabase paths: %w", err)
- }
- return paths, nil
-}
-
-func createMorphClient(ctx context.Context, appCfg *config.Config) (*client.Client, error) {
- addresses := morphconfig.RPCEndpoint(appCfg)
- if len(addresses) == 0 {
- return nil, errNoMorphEndpointsFound
- }
- key := nodeconfig.Key(appCfg)
- cli, err := client.New(ctx,
- key,
- client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
- client.WithEndpoints(addresses...),
- client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
- )
- if err != nil {
- return nil, fmt.Errorf("create morph client:%w", err)
- }
- return cli, nil
-}
-
-func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, error) {
- sh, err := cli.NNSContractAddress(client.NNSContainerContractName)
- if err != nil {
- return nil, fmt.Errorf("resolve container contract hash: %w", err)
- }
- cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
- if err != nil {
- return nil, fmt.Errorf("create morph container client: %w", err)
- }
- return container.NewInfoProvider(func() (container.Source, error) {
- return morphcontainer.AsContainerSource(cc), nil
- }), nil
+ return nil
}
func initUpgradeCommand() {
flags := UpgradeCmd.Flags()
+ flags.StringVar(&path, pathFlag, "", "Path to metabase file")
flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
index 1960faab4..077e03737 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
@@ -5,19 +5,35 @@ import (
"encoding/json"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
- jsonFlag = "json"
- jsonFlagDesc = "Output rule chains in JSON format"
- addrAdminFlag = "addr"
- addrAdminDesc = "The address of the admins wallet"
+ namespaceTarget = "namespace"
+ containerTarget = "container"
+ userTarget = "user"
+ groupTarget = "group"
+ jsonFlag = "json"
+ jsonFlagDesc = "Output rule chains in JSON format"
+ chainIDFlag = "chain-id"
+ chainIDDesc = "Rule chain ID"
+ ruleFlag = "rule"
+ ruleFlagDesc = "Rule chain in text format"
+ pathFlag = "path"
+ pathFlagDesc = "path to encoded chain in JSON or binary format"
+ targetNameFlag = "target-name"
+ targetNameDesc = "Resource name in APE resource name format"
+ targetTypeFlag = "target-type"
+ targetTypeDesc = "Resource type(container/namespace)"
+ addrAdminFlag = "addr"
+ addrAdminDesc = "The address of the admins wallet"
+ chainNameFlag = "chain-name"
+ chainNameFlagDesc = "Chain name(ingress|s3)"
)
var (
@@ -85,17 +101,17 @@ func initAddRuleChainCmd() {
addRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
addRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- addRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- addRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
+ addRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(targetTypeFlag)
+ addRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(targetNameFlag)
- addRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.ChainIDFlag)
- addRuleChainCmd.Flags().StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc)
- addRuleChainCmd.Flags().String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc)
- addRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
- addRuleChainCmd.MarkFlagsMutuallyExclusive(apeCmd.RuleFlag, apeCmd.PathFlag)
+ addRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc)
+ _ = addRuleChainCmd.MarkFlagRequired(chainIDFlag)
+ addRuleChainCmd.Flags().StringArray(ruleFlag, []string{}, ruleFlagDesc)
+ addRuleChainCmd.Flags().String(pathFlag, "", pathFlagDesc)
+ addRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
+ addRuleChainCmd.MarkFlagsMutuallyExclusive(ruleFlag, pathFlag)
}
func initRemoveRuleChainCmd() {
@@ -104,25 +120,26 @@ func initRemoveRuleChainCmd() {
removeRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
removeRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- removeRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- removeRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
- removeRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- removeRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
+ removeRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc)
+ _ = removeRuleChainCmd.MarkFlagRequired(targetTypeFlag)
+ removeRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc)
+ _ = removeRuleChainCmd.MarkFlagRequired(targetNameFlag)
+ removeRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc)
+ removeRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
removeRuleChainCmd.Flags().Bool(commonflags.AllFlag, false, "Remove all chains for target")
- removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, apeCmd.ChainIDFlag)
+ removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, chainIDFlag)
}
func initListRuleChainsCmd() {
Cmd.AddCommand(listRuleChainsCmd)
listRuleChainsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listRuleChainsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
- _ = listRuleChainsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- listRuleChainsCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
+ listRuleChainsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc)
+ _ = listRuleChainsCmd.MarkFlagRequired(targetTypeFlag)
+ listRuleChainsCmd.Flags().String(targetNameFlag, "", targetNameDesc)
+ _ = listRuleChainsCmd.MarkFlagRequired(targetNameFlag)
listRuleChainsCmd.Flags().Bool(jsonFlag, false, jsonFlagDesc)
- listRuleChainsCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
+ listRuleChainsCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc)
}
func initSetAdminCmd() {
@@ -144,15 +161,15 @@ func initListTargetsCmd() {
Cmd.AddCommand(listTargetsCmd)
listTargetsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listTargetsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
- _ = listTargetsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ listTargetsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc)
+ _ = listTargetsCmd.MarkFlagRequired(targetTypeFlag)
}
func addRuleChain(cmd *cobra.Command, _ []string) {
- chain := apeCmd.ParseChain(cmd)
+ chain := parseChain(cmd)
target := parseTarget(cmd)
pci, ac := newPolicyContractInterface(cmd)
- h, vub, err := pci.AddMorphRuleChain(apeCmd.ParseChainName(cmd), target, chain)
+ h, vub, err := pci.AddMorphRuleChain(parseChainName(cmd), target, chain)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "add rule chain error: %w", err)
@@ -164,14 +181,14 @@ func removeRuleChain(cmd *cobra.Command, _ []string) {
pci, ac := newPolicyContractInterface(cmd)
removeAll, _ := cmd.Flags().GetBool(commonflags.AllFlag)
if removeAll {
- h, vub, err := pci.RemoveMorphRuleChainsByTarget(apeCmd.ParseChainName(cmd), target)
+ h, vub, err := pci.RemoveMorphRuleChainsByTarget(parseChainName(cmd), target)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
cmd.Println("All chains for target removed successfully")
} else {
- chainID := apeCmd.ParseChainID(cmd)
- h, vub, err := pci.RemoveMorphRuleChain(apeCmd.ParseChainName(cmd), target, chainID)
+ chainID := parseChainID(cmd)
+ h, vub, err := pci.RemoveMorphRuleChain(parseChainName(cmd), target, chainID)
cmd.Println("Waiting for transaction to persist...")
_, err = ac.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
@@ -182,7 +199,7 @@ func removeRuleChain(cmd *cobra.Command, _ []string) {
func listRuleChains(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
pci, _ := newPolicyContractReaderInterface(cmd)
- chains, err := pci.ListMorphRuleChains(apeCmd.ParseChainName(cmd), target)
+ chains, err := pci.ListMorphRuleChains(parseChainName(cmd), target)
commonCmd.ExitOnErr(cmd, "list rule chains error: %w", err)
if len(chains) == 0 {
return
@@ -193,14 +210,14 @@ func listRuleChains(cmd *cobra.Command, _ []string) {
prettyJSONFormat(cmd, chains)
} else {
for _, c := range chains {
- apeCmd.PrintHumanReadableAPEChain(cmd, c)
+ parseutil.PrintHumanReadableAPEChain(cmd, c)
}
}
}
func setAdmin(cmd *cobra.Command, _ []string) {
s, _ := cmd.Flags().GetString(addrAdminFlag)
- addr, err := address.StringToUint160(s)
+ addr, err := util.Uint160DecodeStringLE(s)
commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err)
pci, ac := newPolicyContractInterface(cmd)
h, vub, err := pci.SetAdmin(addr)
@@ -214,11 +231,12 @@ func getAdmin(cmd *cobra.Command, _ []string) {
pci, _ := newPolicyContractReaderInterface(cmd)
addr, err := pci.GetAdmin()
commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err)
- cmd.Println(address.Uint160ToString(addr))
+ cmd.Println(addr.StringLE())
}
func listTargets(cmd *cobra.Command, _ []string) {
- typ := apeCmd.ParseTargetType(cmd)
+ typ, err := parseTargetType(cmd)
+ commonCmd.ExitOnErr(cmd, "parse target type error: %w", err)
pci, inv := newPolicyContractReaderInterface(cmd)
sid, it, err := pci.ListTargetsIterator(typ)
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
index 3c332c3f0..d4aedda2e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
@@ -2,14 +2,13 @@ package ape
import (
"errors"
+ "strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
+ parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -19,29 +18,90 @@ import (
"github.com/spf13/viper"
)
-var errUnknownTargetType = errors.New("unknown target type")
+const (
+ ingress = "ingress"
+ s3 = "s3"
+)
+
+var mChainName = map[string]apechain.Name{
+ ingress: apechain.Ingress,
+ s3: apechain.S3,
+}
+
+var (
+ errUnknownTargetType = errors.New("unknown target type")
+ errChainIDCannotBeEmpty = errors.New("chain id cannot be empty")
+ errRuleIsNotParsed = errors.New("rule is not passed")
+ errUnsupportedChainName = errors.New("unsupported chain name")
+)
func parseTarget(cmd *cobra.Command) policyengine.Target {
- typ := apeCmd.ParseTargetType(cmd)
- name, _ := cmd.Flags().GetString(apeCmd.TargetNameFlag)
- switch typ {
- case policyengine.Namespace:
- if name == "root" {
- name = ""
- }
- return policyengine.NamespaceTarget(name)
- case policyengine.Container:
- var cnr cid.ID
- commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
- return policyengine.ContainerTarget(name)
- case policyengine.User:
- return policyengine.UserTarget(name)
- case policyengine.Group:
- return policyengine.GroupTarget(name)
- default:
- commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
+ name, _ := cmd.Flags().GetString(targetNameFlag)
+ typ, err := parseTargetType(cmd)
+
+ // interpret "root" namespace as empty
+ if typ == policyengine.Namespace && name == "root" {
+ name = ""
}
- panic("unreachable")
+
+ commonCmd.ExitOnErr(cmd, "read target type error: %w", err)
+
+ return policyengine.Target{
+ Name: name,
+ Type: typ,
+ }
+}
+
+func parseTargetType(cmd *cobra.Command) (policyengine.TargetType, error) {
+ typ, _ := cmd.Flags().GetString(targetTypeFlag)
+ switch typ {
+ case namespaceTarget:
+ return policyengine.Namespace, nil
+ case containerTarget:
+ return policyengine.Container, nil
+ case userTarget:
+ return policyengine.User, nil
+ case groupTarget:
+ return policyengine.Group, nil
+ }
+ return -1, errUnknownTargetType
+}
+
+func parseChainID(cmd *cobra.Command) apechain.ID {
+ chainID, _ := cmd.Flags().GetString(chainIDFlag)
+ if chainID == "" {
+ commonCmd.ExitOnErr(cmd, "read chain id error: %w",
+ errChainIDCannotBeEmpty)
+ }
+ return apechain.ID(chainID)
+}
+
+func parseChain(cmd *cobra.Command) *apechain.Chain {
+ chain := new(apechain.Chain)
+
+ if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules))
+ } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
+ commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath))
+ } else {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed)
+ }
+
+ chain.ID = parseChainID(cmd)
+
+ cmd.Println("Parsed chain:")
+ parseutil.PrintHumanReadableAPEChain(cmd, chain)
+
+ return chain
+}
+
+func parseChainName(cmd *cobra.Command) apechain.Name {
+ chainName, _ := cmd.Flags().GetString(chainNameFlag)
+ apeChainName, ok := mChainName[strings.ToLower(chainName)]
+ if !ok {
+ commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName)
+ }
+ return apeChainName
}
// invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface.
@@ -55,15 +115,16 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
}
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
inv := invoker.New(c, nil)
+ var ch util.Uint160
r := management.NewReader(inv)
nnsCs, err := helper.GetContractByID(r, 1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
- ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
+ ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
invokerAdapter := &invokerAdapter{
@@ -75,11 +136,10 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
}
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
- walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
- ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
+ ac, err := helper.NewLocalActor(cmd, c)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
var ch util.Uint160
diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
index 23dba14f4..5519705d4 100644
--- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
+++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -52,7 +51,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
nmHash util.Uint160
)
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return err
}
@@ -162,7 +161,9 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
}
- assert.NoError(w.Err)
+ if w.Err != nil {
+ panic(w.Err)
+ }
alphaRes, err := c.InvokeScript(w.Bytes(), nil)
if err != nil {
@@ -225,7 +226,9 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
for i := range accounts {
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
}
- assert.NoError(w.Err)
+ if w.Err != nil {
+ panic(w.Err)
+ }
res, err := c.Run(w.Bytes())
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go
index c17fb62ff..3a7f84acb 100644
--- a/cmd/frostfs-adm/internal/modules/morph/config/config.go
+++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go
@@ -26,7 +26,7 @@ import (
const forceConfigSet = "force"
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
nbuf := make([]byte, 8)
- copy(nbuf, v)
+ copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k)
}
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default:
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
}
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/constants/const.go b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
index be4041a86..a3b4f129a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/constants/const.go
+++ b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
@@ -4,6 +4,7 @@ import "time"
const (
ConsensusAccountName = "consensus"
+ ProtoConfigPath = "protocol"
// MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size
// of the invocation script.
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go
index 79685f111..a66438975 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -77,7 +76,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("invalid filename: %w", err)
}
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -140,12 +139,13 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) {
bw.Reset()
emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
+ emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id)
res, err := inv.Run(bw.Bytes())
if err != nil {
return nil, fmt.Errorf("can't get container info: %w", err)
}
- if len(res.Stack) != 1 {
- return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse)
+ if len(res.Stack) != 2 {
+ return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse)
}
cnt := new(Container)
@@ -154,11 +154,19 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
+ ea := new(EACL)
+ err = ea.FromStackItem(res.Stack[1])
+ if err != nil {
+ return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
+ }
+ if len(ea.Value) != 0 {
+ cnt.EACL = ea
+ }
return cnt, nil
}
func listContainers(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -236,7 +244,9 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
putContainer(bw, ch, cnt)
- assert.NoError(bw.Err)
+ if bw.Err != nil {
+ panic(bw.Err)
+ }
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err
@@ -248,6 +258,10 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) {
emit.AppCall(bw.BinWriter, ch, "put", callflag.All,
cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token)
+ if ea := cnt.EACL; ea != nil {
+ emit.AppCall(bw.BinWriter, ch, "setEACL", callflag.All,
+ ea.Value, ea.Signature, ea.PublicKey, ea.Token)
+ }
}
func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) {
@@ -308,6 +322,15 @@ type Container struct {
Signature []byte `json:"signature"`
PublicKey []byte `json:"public_key"`
Token []byte `json:"token"`
+ EACL *EACL `json:"eacl"`
+}
+
+// EACL represents extended ACL struct in contract storage.
+type EACL struct {
+ Value []byte `json:"value"`
+ Signature []byte `json:"signature"`
+ PublicKey []byte `json:"public_key"`
+ Token []byte `json:"token"`
}
// ToStackItem implements stackitem.Convertible.
@@ -354,6 +377,50 @@ func (c *Container) FromStackItem(item stackitem.Item) error {
return nil
}
+// ToStackItem implements stackitem.Convertible.
+func (c *EACL) ToStackItem() (stackitem.Item, error) {
+ return stackitem.NewStruct([]stackitem.Item{
+ stackitem.NewByteArray(c.Value),
+ stackitem.NewByteArray(c.Signature),
+ stackitem.NewByteArray(c.PublicKey),
+ stackitem.NewByteArray(c.Token),
+ }), nil
+}
+
+// FromStackItem implements stackitem.Convertible.
+func (c *EACL) FromStackItem(item stackitem.Item) error {
+ arr, ok := item.Value().([]stackitem.Item)
+ if !ok || len(arr) != 4 {
+ return errors.New("invalid stack item type")
+ }
+
+ value, err := arr[0].TryBytes()
+ if err != nil {
+ return errors.New("invalid eACL value")
+ }
+
+ sig, err := arr[1].TryBytes()
+ if err != nil {
+ return errors.New("invalid eACL signature")
+ }
+
+ pub, err := arr[2].TryBytes()
+ if err != nil {
+ return errors.New("invalid eACL public key")
+ }
+
+ tok, err := arr[3].TryBytes()
+ if err != nil {
+ return errors.New("invalid eACL token")
+ }
+
+ c.Value = value
+ c.Signature = sig
+ c.PublicKey = pub
+ c.Token = tok
+ return nil
+}
+
// getCIDFilterFunc returns filtering function for container IDs.
// Raw byte slices are used because it works with structures returned
// from contract.
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
index 543b5fcb3..5adb480da 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/cli/cmdargs"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
@@ -121,7 +120,9 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
}
}
- assert.NoError(writer.Err, "can't create deployment script")
+ if writer.Err != nil {
+ panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
+ }
if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
return err
@@ -172,8 +173,9 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
}
- assert.NoError(bw.Err, "can't create deployment script")
- if bw.Len() != start {
+ if bw.Err != nil {
+ panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
+ } else if bw.Len() != start {
writer.WriteBytes(bw.Bytes())
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
index fde58fd2b..be2134b77 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
@@ -11,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -37,7 +36,7 @@ type contractDumpInfo struct {
}
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@@ -220,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" {
info.version = "unknown"
}
- _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
- info.name, info.version, info.hash.StringLE()))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
+ info.name, info.version, info.hash.StringLE())))
}
_ = tw.Flush()
@@ -237,17 +236,21 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
} else {
sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
- assert.NoError(sub.Err, "can't create version script")
+ if sub.Err != nil {
+ panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
+ }
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
- bw.WriteBytes(script)
+ bw.BinWriter.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}
}
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
- assert.NoError(bw.Err, "can't create version script")
+ if bw.Err != nil {
+ panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
+ }
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
deleted file mode 100644
index 4046e85e3..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package frostfsid
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- frostfsidAddSubjectKeyCmd = &cobra.Command{
- Use: "add-subject-key",
- Short: "Add a public key to the subject in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidAddSubjectKey,
- }
- frostfsidRemoveSubjectKeyCmd = &cobra.Command{
- Use: "remove-subject-key",
- Short: "Remove a public key from the subject in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidRemoveSubjectKey,
- }
-)
-
-func initFrostfsIDAddSubjectKeyCmd() {
- Cmd.AddCommand(frostfsidAddSubjectKeyCmd)
-
- ff := frostfsidAddSubjectKeyCmd.Flags()
- ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-
- ff.String(subjectAddressFlag, "", "Subject address")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
-
- ff.String(subjectKeyFlag, "", "Public key to add")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
-}
-
-func initFrostfsIDRemoveSubjectKeyCmd() {
- Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd)
-
- ff := frostfsidRemoveSubjectKeyCmd.Flags()
- ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-
- ff.String(subjectAddressFlag, "", "Subject address")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
-
- ff.String(subjectKeyFlag, "", "Public key to remove")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
-}
-
-func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) {
- addr := getFrostfsIDSubjectAddress(cmd)
- pub := getFrostfsIDSubjectKey(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "add subject key: %w", err)
-}
-
-func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) {
- addr := getFrostfsIDSubjectAddress(cmd)
- pub := getFrostfsIDSubjectKey(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "remove subject key: %w", err)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
index 7f777db98..091d6634a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
@@ -1,7 +1,6 @@
package frostfsid
import (
- "encoding/hex"
"errors"
"fmt"
"math/big"
@@ -35,16 +34,11 @@ const (
subjectNameFlag = "subject-name"
subjectKeyFlag = "subject-key"
subjectAddressFlag = "subject-address"
- extendedFlag = "extended"
+ includeNamesFlag = "include-names"
groupNameFlag = "group-name"
groupIDFlag = "group-id"
rootNamespacePlaceholder = ""
-
- keyFlag = "key"
- keyDescFlag = "Key for storing a value in the subject's KV storage"
- valueFlag = "value"
- valueDescFlag = "Value to be stored in the subject's KV storage"
)
var (
@@ -67,6 +61,7 @@ var (
Use: "list-namespaces",
Short: "List all namespaces in frostfsid",
PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidListNamespaces,
@@ -96,6 +91,7 @@ var (
Use: "list-subjects",
Short: "List subjects in namespace",
PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidListSubjects,
@@ -125,6 +121,7 @@ var (
Use: "list-groups",
Short: "List groups in namespace",
PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidListGroups,
@@ -154,27 +151,11 @@ var (
Use: "list-group-subjects",
Short: "List subjects in group",
PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidListGroupSubjects,
}
-
- frostfsidSetKVCmd = &cobra.Command{
- Use: "set-kv",
- Short: "Store a key-value pair in the subject's KV storage",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidSetKV,
- }
- frostfsidDeleteKVCmd = &cobra.Command{
- Use: "delete-kv",
- Short: "Delete a value from the subject's KV storage",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidDeleteKV,
- }
)
func initFrostfsIDCreateNamespaceCmd() {
@@ -188,6 +169,7 @@ func initFrostfsIDCreateNamespaceCmd() {
func initFrostfsIDListNamespacesCmd() {
Cmd.AddCommand(frostfsidListNamespacesCmd)
frostfsidListNamespacesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ frostfsidListNamespacesCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initFrostfsIDCreateSubjectCmd() {
@@ -210,7 +192,8 @@ func initFrostfsIDListSubjectsCmd() {
Cmd.AddCommand(frostfsidListSubjectsCmd)
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
- frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
+ frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
+ frostfsidListSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initFrostfsIDCreateGroupCmd() {
@@ -234,6 +217,7 @@ func initFrostfsIDListGroupsCmd() {
Cmd.AddCommand(frostfsidListGroupsCmd)
frostfsidListGroupsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListGroupsCmd.Flags().String(namespaceFlag, "", "Namespace to list groups")
+ frostfsidListGroupsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initFrostfsIDAddSubjectToGroupCmd() {
@@ -257,22 +241,8 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
- frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
-}
-
-func initFrostfsIDSetKVCmd() {
- Cmd.AddCommand(frostfsidSetKVCmd)
- frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
- frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag)
- frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag)
-}
-
-func initFrostfsIDDeleteKVCmd() {
- Cmd.AddCommand(frostfsidDeleteKVCmd)
- frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
- frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag)
+ frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
+ frostfsidListGroupSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
@@ -292,7 +262,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
- items, err := readIterator(inv, &it, sessionID)
+ items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items)
@@ -337,32 +307,34 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
}
func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
- extended, _ := cmd.Flags().GetBool(extendedFlag)
+ includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
ns := getFrostfsIDNamespace(cmd)
inv, _, hash := initInvoker(cmd)
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
- subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID))
+ subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
for _, addr := range subAddresses {
- if !extended {
+ if !includeNames {
cmd.Println(address.Uint160ToString(addr))
continue
}
- items, err := reader.GetSubject(addr)
+ sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
+ items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
+ commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
+
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
- printSubjectInfo(cmd, addr, subj)
- cmd.Println()
+ cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
}
}
@@ -402,7 +374,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
- items, err := readIterator(inv, &it, sessionID)
+ items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@@ -440,49 +412,10 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err)
}
-func frostfsidSetKV(cmd *cobra.Command, _ []string) {
- subjectAddress := getFrostfsIDSubjectAddress(cmd)
- key, _ := cmd.Flags().GetString(keyFlag)
- value, _ := cmd.Flags().GetString(valueFlag)
-
- if key == "" {
- commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
- }
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value)
-
- ffsid.addCall(method, args)
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "set KV: %w", err)
-}
-
-func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
- subjectAddress := getFrostfsIDSubjectAddress(cmd)
- key, _ := cmd.Flags().GetString(keyFlag)
-
- if key == "" {
- commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
- }
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key)
-
- ffsid.addCall(method, args)
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "delete KV: %w", err)
-}
-
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd)
- extended, _ := cmd.Flags().GetBool(extendedFlag)
+ includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
inv, cs, hash := initInvoker(cmd)
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
@@ -491,7 +424,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
- items, err := readIterator(inv, &it, sessionID)
+ items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@@ -500,7 +433,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
for _, subjAddr := range subjects {
- if !extended {
+ if !includeNames {
cmd.Println(address.Uint160ToString(subjAddr))
continue
}
@@ -509,8 +442,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
- printSubjectInfo(cmd, subjAddr, subj)
- cmd.Println()
+ cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
}
}
@@ -565,28 +497,32 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
}
f.bw.Reset()
+ if len(f.wCtx.SentTxs) == 0 {
+ return nil, errors.New("no transactions to wait")
+ }
+
f.wCtx.Command.Println("Waiting for transactions to persist...")
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
}
-func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) {
+func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool
res := make([]stackitem.Item, 0)
for !shouldStop {
- items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize)
+ items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil {
return nil, err
}
res = append(res, items...)
- shouldStop = len(items) < iteratorBatchSize
+ shouldStop = len(items) < batchSize
}
return res, nil
}
func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)
@@ -600,30 +536,3 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui
return inv, cs, nmHash
}
-
-func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
- cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
- pk := ""
- if subj.PrimaryKey != nil {
- pk = hex.EncodeToString(subj.PrimaryKey.Bytes())
- }
- cmd.Printf("Primary key: %s\n", pk)
- cmd.Printf("Name: %s\n", subj.Name)
- cmd.Printf("Namespace: %s\n", subj.Namespace)
- if len(subj.AdditionalKeys) > 0 {
- cmd.Printf("Additional keys:\n")
- for _, key := range subj.AdditionalKeys {
- k := ""
- if key != nil {
- k = hex.EncodeToString(key.Bytes())
- }
- cmd.Printf("- %s\n", k)
- }
- }
- if len(subj.KV) > 0 {
- cmd.Printf("KV:\n")
- for k, v := range subj.KV {
- cmd.Printf("- %s: %s\n", k, v)
- }
- }
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go
index 1d0bc8441..cce859d2f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go
@@ -1,12 +1,59 @@
package frostfsid
import (
+ "encoding/hex"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/spf13/viper"
"github.com/stretchr/testify/require"
)
+func TestFrostfsIDConfig(t *testing.T) {
+ pks := make([]*keys.PrivateKey, 4)
+ for i := range pks {
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ pks[i] = pk
+ }
+
+ fmts := []string{
+ pks[0].GetScriptHash().StringLE(),
+ address.Uint160ToString(pks[1].GetScriptHash()),
+ hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
+ hex.EncodeToString(pks[3].PublicKey().Bytes()),
+ }
+
+ for i := range fmts {
+ v := viper.New()
+ v.Set("frostfsid.admin", fmts[i])
+
+ actual, found, err := helper.GetFrostfsIDAdmin(v)
+ require.NoError(t, err)
+ require.True(t, found)
+ require.Equal(t, pks[i].GetScriptHash(), actual)
+ }
+
+ t.Run("bad key", func(t *testing.T) {
+ v := viper.New()
+ v.Set("frostfsid.admin", "abc")
+
+ _, found, err := helper.GetFrostfsIDAdmin(v)
+ require.Error(t, err)
+ require.True(t, found)
+ })
+ t.Run("missing key", func(t *testing.T) {
+ v := viper.New()
+
+ _, found, err := helper.GetFrostfsIDAdmin(v)
+ require.NoError(t, err)
+ require.False(t, found)
+ })
+}
+
func TestNamespaceRegexp(t *testing.T) {
for _, tc := range []struct {
name string
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go
index 8aad5c5c1..850474794 100644
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go
@@ -12,8 +12,4 @@ func init() {
initFrostfsIDAddSubjectToGroupCmd()
initFrostfsIDRemoveSubjectFromGroupCmd()
initFrostfsIDListGroupSubjectsCmd()
- initFrostfsIDSetKVCmd()
- initFrostfsIDDeleteKVCmd()
- initFrostfsIDAddSubjectKeyCmd()
- initFrostfsIDRemoveSubjectKeyCmd()
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
index 78f8617f1..7af776797 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
@@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
@@ -140,29 +141,60 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
}
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
- walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
- w, err := wallet.NewWallet(walletPath)
- if err != nil {
- return fmt.Errorf("create wallet: %w", err)
- }
-
- label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
- password, err := config.GetStoragePassword(viper.GetViper(), label)
- if err != nil {
- return fmt.Errorf("can't fetch password: %w", err)
- }
-
- if label == "" {
- label = constants.SingleAccountName
- }
-
- if err := w.CreateAccount(label, password); err != nil {
- return fmt.Errorf("can't create account: %w", err)
- }
- return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
+ return refillGas(cmd, storageGasConfigFlag, true)
}
-func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
+func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
+ // storage wallet path is not part of the config
+ storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
+ // wallet address is not part of the config
+ walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
+
+ var gasReceiver util.Uint160
+
+ if len(walletAddress) != 0 {
+ gasReceiver, err = address.StringToUint160(walletAddress)
+ if err != nil {
+ return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
+ }
+ } else {
+ if storageWalletPath == "" {
+ return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag)
+ }
+
+ var w *wallet.Wallet
+
+ if createWallet {
+ w, err = wallet.NewWallet(storageWalletPath)
+ } else {
+ w, err = wallet.NewWalletFromFile(storageWalletPath)
+ }
+
+ if err != nil {
+ return fmt.Errorf("can't create wallet: %w", err)
+ }
+
+ if createWallet {
+ var password string
+
+ label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
+ password, err := config.GetStoragePassword(viper.GetViper(), label)
+ if err != nil {
+ return fmt.Errorf("can't fetch password: %w", err)
+ }
+
+ if label == "" {
+ label = constants.SingleAccountName
+ }
+
+ if err := w.CreateAccount(label, password); err != nil {
+ return fmt.Errorf("can't create account: %w", err)
+ }
+ }
+
+ gasReceiver = w.Accounts[0].Contract.ScriptHash()
+ }
+
gasStr := viper.GetString(gasFlag)
gasAmount, err := helper.ParseGASAmount(gasStr)
@@ -176,11 +208,9 @@ func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160)
}
bw := io.NewBufBinWriter()
- for _, gasReceiver := range gasReceivers {
- emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
- wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
- emit.Opcodes(bw.BinWriter, opcode.ASSERT)
- }
+ emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
+ wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
+ emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if bw.Err != nil {
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go
index 15af5637b..1dd6420eb 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go
@@ -63,7 +63,7 @@ func TestGenerateAlphabet(t *testing.T) {
buf.Reset()
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
- for i := range uint64(size) {
+ for i := uint64(0); i < size; i++ {
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go
index 73c986713..3633d9a8e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate/root.go
@@ -1,12 +1,7 @@
package generate
import (
- "fmt"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -38,27 +33,7 @@ var (
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
},
RunE: func(cmd *cobra.Command, _ []string) error {
- storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
- walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
-
- var gasReceivers []util.Uint160
- for _, walletAddress := range walletAddresses {
- addr, err := address.StringToUint160(walletAddress)
- if err != nil {
- return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
- }
-
- gasReceivers = append(gasReceivers, addr)
- }
- for _, storageWalletPath := range storageWalletPaths {
- w, err := wallet.NewWalletFromFile(storageWalletPath)
- if err != nil {
- return fmt.Errorf("can't create wallet: %w", err)
- }
-
- gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
- }
- return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
+ return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
},
}
GenerateAlphabetCmd = &cobra.Command{
@@ -75,10 +50,10 @@ var (
func initRefillGasCmd() {
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
- RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
+ RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
+ RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
- RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
+ RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
}
func initGenerateStorageCmd() {
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go
index 6499ace5f..1ca246f9f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go
@@ -3,6 +3,10 @@ package helper
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@@ -13,6 +17,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/wallet"
+ "github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -24,88 +29,44 @@ type LocalActor struct {
rpcInvoker invoker.RPCInvoke
}
-type AlphabetWallets struct {
- Label string
- Path string
-}
-
-func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) {
- w, err := GetAlphabetWallets(v, a.Path)
- if err != nil {
- return nil, err
- }
-
- var accounts []*wallet.Account
- for _, wall := range w {
- acc, err := GetWalletAccount(wall, a.Label)
- if err != nil {
- return nil, err
- }
- accounts = append(accounts, acc)
- }
- return accounts, nil
-}
-
-type RegularWallets struct{ Path string }
-
-func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) {
- w, err := getRegularWallet(r.Path)
- if err != nil {
- return nil, err
- }
-
- return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil
-}
-
// NewLocalActor create LocalActor with accounts form provided wallets.
// In case of empty wallets provided created actor with dummy account only for read operation.
-//
-// If wallets are provided, the contract client will use accounts with accName name from these wallets.
-// To determine which account name should be used in a contract client, refer to how the contract
-// verifies the transaction signature.
-func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) {
+func NewLocalActor(cmd *cobra.Command, c actor.RPCActor) (*LocalActor, error) {
+ walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
var act *actor.Actor
var accounts []*wallet.Account
- var signers []actor.SignerAccount
-
- if alphabet != nil {
- account, err := alphabet.GetAccount(viper.GetViper())
+ if walletDir == "" {
+ account, err := wallet.NewAccount()
+ commonCmd.ExitOnErr(cmd, "unable to create dummy account: %w", err)
+ act, err = actor.New(c, []actor.SignerAccount{{
+ Signer: transaction.Signer{
+ Account: account.Contract.ScriptHash(),
+ Scopes: transaction.Global,
+ },
+ Account: account,
+ }})
if err != nil {
return nil, err
}
+ } else {
+ wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
+ commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
- accounts = append(accounts, account...)
- signers = append(signers, actor.SignerAccount{
+ for _, w := range wallets {
+ acc, err := GetWalletAccount(w, constants.CommitteeAccountName)
+ commonCmd.ExitOnErr(cmd, "can't find committee account: %w", err)
+ accounts = append(accounts, acc)
+ }
+ act, err = actor.New(c, []actor.SignerAccount{{
Signer: transaction.Signer{
- Account: account[0].Contract.ScriptHash(),
+ Account: accounts[0].Contract.ScriptHash(),
Scopes: transaction.Global,
},
- Account: account[0],
- })
- }
-
- for _, w := range regularWallets {
- if w == nil {
- continue
- }
- account, err := w.GetAccount()
+ Account: accounts[0],
+ }})
if err != nil {
return nil, err
}
-
- accounts = append(accounts, account...)
- signers = append(signers, actor.SignerAccount{
- Signer: transaction.Signer{
- Account: account[0].Contract.ScriptHash(),
- Scopes: transaction.Global,
- },
- Account: account[0],
- })
- }
-
- act, err := actor.New(c, signers)
- if err != nil {
- return nil, err
}
return &LocalActor{
neoActor: act,
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
index 64d1c6393..2011301d1 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
@@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any
h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker)
}
if method != constants.UpdateMethodName || err == nil && !found {
- h, found, err = getFrostfsIDAdmin(viper.GetViper())
+ h, found, err = GetFrostfsIDAdmin(viper.GetViper())
}
if err != nil {
return nil, err
@@ -166,6 +166,5 @@ func DeployNNS(c *InitializeContext, method string) error {
return fmt.Errorf("can't send deploy transaction: %w", err)
}
- c.Command.Println("NNS hash:", invokeHash.StringLE())
return c.AwaitTx()
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go
index fce2dfb74..f29042b82 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go
@@ -11,7 +11,7 @@ import (
const frostfsIDAdminConfigKey = "frostfsid.admin"
-func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
+func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
admin := v.GetString(frostfsIDAdminConfigKey)
if admin == "" {
return util.Uint160{}, false, nil
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go
deleted file mode 100644
index 38991e962..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package helper
-
-import (
- "encoding/hex"
- "testing"
-
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/spf13/viper"
- "github.com/stretchr/testify/require"
-)
-
-func TestFrostfsIDConfig(t *testing.T) {
- pks := make([]*keys.PrivateKey, 4)
- for i := range pks {
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- pks[i] = pk
- }
-
- fmts := []string{
- pks[0].GetScriptHash().StringLE(),
- address.Uint160ToString(pks[1].GetScriptHash()),
- hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
- hex.EncodeToString(pks[3].PublicKey().Bytes()),
- }
-
- for i := range fmts {
- v := viper.New()
- v.Set("frostfsid.admin", fmts[i])
-
- actual, found, err := getFrostfsIDAdmin(v)
- require.NoError(t, err)
- require.True(t, found)
- require.Equal(t, pks[i].GetScriptHash(), actual)
- }
-
- t.Run("bad key", func(t *testing.T) {
- v := viper.New()
- v.Set("frostfsid.admin", "abc")
-
- _, found, err := getFrostfsIDAdmin(v)
- require.Error(t, err)
- require.True(t, found)
- })
- t.Run("missing key", func(t *testing.T) {
- v := viper.New()
-
- _, found, err := getFrostfsIDAdmin(v)
- require.NoError(t, err)
- require.False(t, found)
- })
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go
index 50b5c1ec7..961ceba53 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go
@@ -6,7 +6,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@@ -14,7 +13,9 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
+ nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
}
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
- inv := invoker.New(c, nil)
- reader := nns2.NewReader(inv, nnsHash)
- return reader.IsAvailable(name)
+ switch c.(type) {
+ case *rpcclient.Client:
+ inv := invoker.New(c, nil)
+ reader := nns2.NewReader(inv, nnsHash)
+ return reader.IsAvailable(name)
+ default:
+ b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
+ if err != nil {
+ return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
+ }
+
+ return b, nil
+ }
}
func CheckNotaryEnabled(c Client) error {
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go
index da5ffedae..b5b6adf05 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go
@@ -13,7 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -22,7 +21,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
@@ -30,6 +28,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -135,12 +134,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex
return nil, err
}
- accounts, err := getSingleAccounts(wallets)
+ accounts, err := createWalletAccounts(wallets)
if err != nil {
return nil, err
}
- cliCtx, err := defaultClientContext(c, committeeAcc)
+ cliCtx, err := DefaultClientContext(c, committeeAcc)
if err != nil {
return nil, fmt.Errorf("client context: %w", err)
}
@@ -192,7 +191,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet)
}
c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String())
} else {
- c, err = NewRemoteClient(v)
+ c, err = GetN3Client(v)
}
if err != nil {
return nil, fmt.Errorf("can't create N3 client: %w", err)
@@ -212,7 +211,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
return ctrPath, nil
}
-func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
+func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
accounts := make([]*wallet.Account, len(wallets))
for i, w := range wallets {
acc, err := GetWalletAccount(w, constants.SingleAccountName)
@@ -376,7 +375,9 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen
}
act, err = actor.New(c.Client, signers)
} else {
- assert.False(withConsensus, "BUG: should never happen")
+ if withConsensus {
+ panic("BUG: should never happen")
+ }
act, err = c.CommitteeAct, nil
}
if err != nil {
@@ -410,9 +411,11 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp
func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error {
version, err := c.Client.GetVersion()
- // error appears only if client
- // has not been initialized
- assert.NoError(err)
+ if err != nil {
+ // error appears only if client
+ // has not been initialized
+ panic(err)
+ }
network := version.Protocol.Network
// Use parameter context to avoid dealing with signature order.
@@ -444,12 +447,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin
for i := range tx.Signers {
if tx.Signers[i].Account == h {
- assert.True(i <= len(tx.Scripts), "BUG: invalid signing order")
if i < len(tx.Scripts) {
tx.Scripts[i] = *w
- }
- if i == len(tx.Scripts) {
+ } else if i == len(tx.Scripts) {
tx.Scripts = append(tx.Scripts, *w)
+ } else {
+ panic("BUG: invalid signing order")
}
return nil
}
@@ -507,7 +510,9 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
- assert.NoError(bw.Err)
+ if bw.Err != nil {
+ panic(bw.Err)
+ }
return bw.Bytes(), false, nil
}
@@ -519,8 +524,12 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
}
func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
- avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone))
- return !avail, err
+ res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone)
+ if err != nil {
+ return false, err
+ }
+
+ return res.State == vmstate.Halt.String(), nil
}
func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool {
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
index 46611c177..375fa84d7 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
@@ -8,9 +8,7 @@ import (
"sort"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/config"
"github.com/nspcc-dev/neo-go/pkg/core"
@@ -49,7 +47,7 @@ type LocalClient struct {
}
func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*LocalClient, error) {
- cfg, err := config.LoadFile(v.GetString(commonflags.ProtoConfigPath))
+ cfg, err := config.LoadFile(v.GetString(constants.ProtoConfigPath))
if err != nil {
return nil, err
}
@@ -59,59 +57,17 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
return nil, err
}
- go bc.Run()
-
- accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets)
- if err != nil {
- return nil, err
- }
-
- if cmd.Name() != "init" {
- if err := restoreDump(bc, dumpPath); err != nil {
- return nil, fmt.Errorf("restore dump: %w", err)
- }
- }
-
- return &LocalClient{
- bc: bc,
- dumpPath: dumpPath,
- accounts: accounts,
- }, nil
-}
-
-func restoreDump(bc *core.Blockchain, dumpPath string) error {
- f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
- if err != nil {
- return fmt.Errorf("can't open local dump: %w", err)
- }
- defer f.Close()
-
- r := io.NewBinReaderFromIO(f)
-
- var skip uint32
- if bc.BlockHeight() != 0 {
- skip = bc.BlockHeight() + 1
- }
-
- count := r.ReadU32LE() - skip
- if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
- return err
- }
- return nil
-}
-
-func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) {
+ m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
accounts := make([]*wallet.Account, len(wallets))
for i := range accounts {
- acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName)
+ accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName)
if err != nil {
return nil, err
}
- accounts[i] = acc
}
indexMap := make(map[string]int)
- for i, pub := range cfg.StandbyCommittee {
+ for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee {
indexMap[pub] = i
}
@@ -120,12 +76,37 @@ func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet
pj := accounts[j].PrivateKey().PublicKey().Bytes()
return indexMap[string(pi)] < indexMap[string(pj)]
})
- sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool {
+ sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool {
return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
})
- m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount))
- return accounts[:m], nil
+ go bc.Run()
+
+ if cmd.Name() != "init" {
+ f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
+ if err != nil {
+ return nil, fmt.Errorf("can't open local dump: %w", err)
+ }
+ defer f.Close()
+
+ r := io.NewBinReaderFromIO(f)
+
+ var skip uint32
+ if bc.BlockHeight() != 0 {
+ skip = bc.BlockHeight() + 1
+ }
+
+ count := r.ReadU32LE() - skip
+ if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
+ return nil, fmt.Errorf("can't restore local dump: %w", err)
+ }
+ }
+
+ return &LocalClient{
+ bc: bc,
+ dumpPath: dumpPath,
+ accounts: accounts[:m],
+ }, nil
}
func (l *LocalClient) GetBlockCount() (uint32, error) {
@@ -146,6 +127,11 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul
return &a, nil
}
+func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) {
+ // not used by `morph init` command
+ panic("unexpected call")
+}
+
// InvokeFunction is implemented via `InvokeScript`.
func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
var err error
@@ -309,7 +295,13 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer)
}
func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
- tx = tx.Copy()
+ // We need to test that transaction was formed correctly to catch as many errors as we can.
+ bs := tx.Bytes()
+ _, err := transaction.NewTransactionFromBytes(bs)
+ if err != nil {
+ return tx.Hash(), fmt.Errorf("invalid transaction: %w", err)
+ }
+
l.transactions = append(l.transactions, tx)
return tx.Hash(), nil
}
@@ -317,7 +309,9 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint
func (l *LocalClient) putTransactions() error {
// 1. Prepare new block.
lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
- assert.NoError(err)
+ if err != nil {
+ panic(err)
+ }
defer func() { l.transactions = l.transactions[:0] }()
b := &block.Block{
@@ -358,7 +352,9 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s
w := io.NewBufBinWriter()
emit.Array(w.BinWriter, parameters...)
emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All)
- assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
+ if w.Err != nil {
+ panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
+ }
return c.InvokeScript(w.Bytes(), signers)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go
index 3f3a66cb6..e62a21b3f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
@@ -24,10 +25,15 @@ import (
// Client represents N3 client interface capable of test-invoking scripts
// and sending signed transactions to chain.
type Client interface {
- actor.RPCActor
+ invoker.RPCInvoke
+ GetBlockCount() (uint32, error)
GetNativeContracts() ([]state.Contract, error)
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
+ GetVersion() (*result.Version, error)
+ SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
+ GetCommittee() (keys.PublicKeys, error)
+ CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
}
type HashVUBPair struct {
@@ -42,7 +48,7 @@ type ClientContext struct {
SentTxs []HashVUBPair
}
-func NewRemoteClient(v *viper.Viper) (Client, error) {
+func GetN3Client(v *viper.Viper) (Client, error) {
// number of opened connections
// by neo-go client per one host
const (
@@ -82,14 +88,8 @@ func NewRemoteClient(v *viper.Viper) (Client, error) {
return c, nil
}
-func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
- commAct, err := actor.New(c, []actor.SignerAccount{{
- Signer: transaction.Signer{
- Account: committeeAcc.Contract.ScriptHash(),
- Scopes: transaction.Global,
- },
- Account: committeeAcc,
- }})
+func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
+ commAct, err := NewActor(c, committeeAcc)
if err != nil {
return nil, err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
index 20abaff0a..7a778f8c3 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
@@ -3,7 +3,6 @@ package helper
import (
"errors"
"fmt"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@@ -73,17 +72,13 @@ func InvalidConfigValueErr(key string) error {
return fmt.Errorf("invalid %s config value from netmap contract", key)
}
-func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160, countEpoch int64) error {
- if countEpoch <= 0 {
- return errors.New("number of epochs cannot be less than 1")
- }
-
+func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160) error {
curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch"))
if err != nil {
return errors.New("can't fetch current epoch from the netmap contract")
}
- newEpoch := curr + countEpoch
+ newEpoch := curr + 1
wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch)
// In NeoFS this is done via Notary contract. Here, however, we can form the
@@ -119,8 +114,11 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
return err
}
for k, v := range m {
- if slices.Contains(NetmapConfigKeys, k) {
- md[k] = v
+ for _, key := range NetmapConfigKeys {
+ if k == key {
+ md[k] = v
+ break
+ }
}
}
return nil
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
index be6b2c6dd..8c6b90539 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
@@ -14,36 +14,16 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
- "github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/viper"
)
-func getRegularWallet(walletPath string) (*wallet.Wallet, error) {
- w, err := wallet.NewWalletFromFile(walletPath)
- if err != nil {
- return nil, err
- }
-
- password, err := input.ReadPassword("Enter password for wallet:")
- if err != nil {
- return nil, fmt.Errorf("can't fetch password: %w", err)
- }
-
- for i := range w.Accounts {
- if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
- err = fmt.Errorf("can't unlock wallet: %w", err)
- break
- }
- }
-
- return w, err
-}
-
func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
wallets, err := openAlphabetWallets(v, walletDir)
if err != nil {
@@ -73,7 +53,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
if errors.Is(err, os.ErrNotExist) {
err = nil
} else {
- err = fmt.Errorf("can't open alphabet wallet: %w", err)
+ err = fmt.Errorf("can't open wallet: %w", err)
}
break
}
@@ -107,6 +87,16 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
return wallets, nil
}
+func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) {
+ return actor.New(c, []actor.SignerAccount{{
+ Signer: transaction.Signer{
+ Account: committeeAcc.Contract.ScriptHash(),
+ Scopes: transaction.Global,
+ },
+ Account: committeeAcc,
+ }})
+}
+
func ReadContract(ctrPath, ctrName string) (*ContractState, error) {
rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go
index 176356378..e127ca545 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -112,7 +111,9 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b
emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
- assert.NoError(w.Err, "can't wrap register script")
+ if w.Err != nil {
+ panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err))
+ }
}
func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go
index 7b7597d91..4c6607f9a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go
@@ -1,18 +1,21 @@
package initialize
import (
+ "errors"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/pkg/core/native"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -27,8 +30,7 @@ const (
)
func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
- reader := neo.NewReader(c.ReadOnlyInvoker)
- regPrice, err := reader.GetRegisterPrice()
+ regPrice, err := getCandidateRegisterPrice(c)
if err != nil {
return fmt.Errorf("can't fetch registration price: %w", err)
}
@@ -40,7 +42,9 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
emit.Opcodes(w.BinWriter, opcode.ASSERT)
}
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
- assert.NoError(w.Err)
+ if w.Err != nil {
+ panic(fmt.Sprintf("BUG: %v", w.Err))
+ }
signers := []actor.SignerAccount{{
Signer: c.GetSigner(false, c.CommitteeAcc),
@@ -112,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error {
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
neoHash := neo.Hash
- ok, err := transferNEOFinished(c)
+ ok, err := transferNEOFinished(c, neoHash)
if ok || err != nil {
return err
}
@@ -135,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
return c.AwaitTx()
}
-func transferNEOFinished(c *helper.InitializeContext) (bool, error) {
- r := neo.NewReader(c.ReadOnlyInvoker)
+func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
+ r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
}
+
+var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
+
+func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
+ switch c.Client.(type) {
+ case *rpcclient.Client:
+ inv := invoker.New(c.Client, nil)
+ reader := neo.NewReader(inv)
+ return reader.GetRegisterPrice()
+ default:
+ neoHash := neo.Hash
+ res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
+ if err != nil {
+ return 0, err
+ }
+ if len(res.Stack) == 0 {
+ return 0, errGetPriceInvalid
+ }
+ bi, err := res.Stack[0].TryInteger()
+ if err != nil || !bi.IsInt64() {
+ return 0, errGetPriceInvalid
+ }
+ return bi.Int64(), nil
+ }
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
index 05bc83a8b..a6815ee13 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
@@ -1,8 +1,6 @@
package initialize
import (
- "fmt"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -31,14 +29,10 @@ func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error {
callflag.States|callflag.AllowNotify, int64(noderoles.NeoFSAlphabet), pubs)
if err := c.SendCommitteeTx(w.Bytes(), false); err != nil {
- return fmt.Errorf("send committee transaction: %w", err)
+ return err
}
- err := c.AwaitTx()
- if err != nil {
- err = fmt.Errorf("await committee transaction: %w", err)
- }
- return err
+ return c.AwaitTx()
}
func setRolesFinished(c *helper.InitializeContext) (bool, error) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
index 9bc51c055..74f5d3e88 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
@@ -62,7 +62,7 @@ func testInitialize(t *testing.T, committeeSize int) {
v := viper.GetViper()
require.NoError(t, generateTestData(testdataDir, committeeSize))
- v.Set(commonflags.ProtoConfigPath, filepath.Join(testdataDir, protoFileName))
+ v.Set(constants.ProtoConfigPath, filepath.Join(testdataDir, protoFileName))
// Set to the path or remove the next statement to download from the network.
require.NoError(t, Cmd.Flags().Set(commonflags.ContractsInitFlag, contractsPath))
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
index bb684b3a9..b7102fa13 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
@@ -3,7 +3,6 @@ package initialize
import (
"fmt"
"math/big"
- "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@@ -22,16 +21,17 @@ import (
)
const (
+ gasInitialTotalSupply = 30000000 * native.GASFactor
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor
+ // alphabetGasRatio is a coefficient that defines the threshold below which
+ // the balance of the alphabet node is considered not replenished. The value
+ // of this coefficient is determined empirically.
+ alphabetGasRatio = 5
)
-func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 {
- return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
-}
-
func transferFunds(c *helper.InitializeContext) error {
ok, err := transferFundsFinished(c)
if ok || err != nil {
@@ -41,11 +41,6 @@ func transferFunds(c *helper.InitializeContext) error {
return err
}
- version, err := c.Client.GetVersion()
- if err != nil {
- return err
- }
-
var transfers []transferTarget
for _, acc := range c.Accounts {
to := acc.Contract.ScriptHash()
@@ -63,7 +58,7 @@ func transferFunds(c *helper.InitializeContext) error {
transferTarget{
Token: gas.Hash,
Address: c.CommitteeAcc.Contract.ScriptHash(),
- Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)),
+ Amount: (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2,
},
transferTarget{
Token: neo.Hash,
@@ -84,26 +79,12 @@ func transferFunds(c *helper.InitializeContext) error {
return c.AwaitTx()
}
-// transferFundsFinished checks balances of accounts we transfer GAS to.
-// The stage is considered finished if the balance is greater than the half of what we need to transfer.
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
+ acc := c.Accounts[0]
+
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
- res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
- if err != nil {
- return false, err
- }
-
- version, err := c.Client.GetVersion()
- if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 {
- return false, err
- }
-
- res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
- if err != nil {
- return false, err
- }
-
- return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
+ res, err := r.BalanceOf(acc.Contract.ScriptHash())
+ return res.Cmp(big.NewInt(alphabetGasRatio*native.GASFactor)) == 1, err
}
func transferGASToProxy(c *helper.InitializeContext) error {
@@ -163,17 +144,5 @@ func createNEP17MultiTransferTx(c helper.Client, acc *wallet.Account, recipients
if err != nil {
return nil, fmt.Errorf("can't create actor: %w", err)
}
- tx, err := act.MakeRun(w.Bytes())
- if err != nil {
- sum := make(map[util.Uint160]int64)
- for _, recipient := range recipients {
- sum[recipient.Token] += recipient.Amount
- }
- detail := make([]string, 0, len(sum))
- for _, value := range sum {
- detail = append(detail, fmt.Sprintf("amount=%v", value))
- }
- err = fmt.Errorf("transfer failed: from=%s(%s) %s: %w", acc.Label, acc.Address, strings.Join(detail, " "), err)
- }
- return tx, err
+ return act.MakeRun(w.Bytes())
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go
index 50f14e728..b7885c512 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go
@@ -2,6 +2,7 @@ package initialize
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -31,7 +32,7 @@ var Cmd = &cobra.Command{
_ = viper.BindPFlag(commonflags.ContainerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag))
_ = viper.BindPFlag(commonflags.ContainerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag))
_ = viper.BindPFlag(commonflags.WithdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag))
- _ = viper.BindPFlag(commonflags.ProtoConfigPath, cmd.Flags().Lookup(commonflags.ProtoConfigPath))
+ _ = viper.BindPFlag(constants.ProtoConfigPath, cmd.Flags().Lookup(constants.ProtoConfigPath))
},
RunE: initializeSideChainCmd,
}
@@ -47,7 +48,7 @@ func initInitCmd() {
// Defaults are taken from neo-preodolenie.
Cmd.Flags().Uint64(containerFeeCLIFlag, 1000, "Container registration fee")
Cmd.Flags().Uint64(containerAliasFeeCLIFlag, 500, "Container alias fee")
- Cmd.Flags().String(commonflags.ProtoConfigPath, "", "Path to the consensus node configuration")
+ Cmd.Flags().String(constants.ProtoConfigPath, "", "Path to the consensus node configuration")
Cmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
Cmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
index 94223dbd0..df9a03fd1 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
@@ -12,8 +12,6 @@ import (
"github.com/spf13/viper"
)
-const deltaFlag = "delta"
-
func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
@@ -32,8 +30,7 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
}
bw := io.NewBufBinWriter()
- delta, _ := cmd.Flags().GetInt64(deltaFlag)
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil {
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go
index a689e0ec1..d8471bb9a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go
@@ -13,7 +13,7 @@ import (
)
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
index 291482e0f..31fda860e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
@@ -12,6 +12,7 @@ var (
Short: "List netmap candidates nodes",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
},
Run: listNetmapCandidatesNodes,
}
@@ -34,7 +35,6 @@ func initForceNewEpochCmd() {
ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
- ForceNewEpoch.Flags().Int64(deltaFlag, 1, "Number of epochs to increase the current epoch")
}
func init() {
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
deleted file mode 100644
index 14f6eb390..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package nns
-
-import (
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-func initRegisterCmd() {
- Cmd.AddCommand(registerCmd)
- registerCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- registerCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- registerCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- registerCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email")
- registerCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, "SOA record REFRESH parameter")
- registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
- registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
- registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
- registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
-
- _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
-}
-
-func registerDomain(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
-
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- email, _ := cmd.Flags().GetString(nnsEmailFlag)
- refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag)
- retry, _ := cmd.Flags().GetInt64(nnsRetryFlag)
- expire, _ := cmd.Flags().GetInt64(nnsExpireFlag)
- ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag)
-
- h, vub, err := c.Register(name, actor.Sender(), email, big.NewInt(refresh),
- big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl))
- commonCmd.ExitOnErr(cmd, "unable to register domain: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "register domain error: %w", err)
- cmd.Println("Domain registered successfully")
-}
-
-func initDeleteCmd() {
- Cmd.AddCommand(deleteCmd)
- deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
-
- _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
-}
-
-func deleteDomain(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
-
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- h, vub, err := c.DeleteDomain(name)
-
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
- cmd.Println("Domain deleted successfully")
-}
-
-func initSetAdminCmd() {
- Cmd.AddCommand(setAdminCmd)
- setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
- setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage)
- _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath)
-
- _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag)
-}
-
-func setAdmin(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
-
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath))
- commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err)
- h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash())
-
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "Set admin error: %w", err)
- cmd.Println("Set admin successfully")
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go
index e49f62256..0eaa5ac58 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go
@@ -1,67 +1,25 @@
package nns
import (
- "errors"
-
client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
-func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
+func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, util.Uint160) {
v := viper.GetViper()
- c, err := helper.NewRemoteClient(v)
+ c, err := helper.GetN3Client(v)
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
- alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag))
- walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath))
- adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath))
-
- var (
- alphabet *helper.AlphabetWallets
- regularWallets []*helper.RegularWallets
- )
-
- if alphabetWalletPath != "" {
- alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName}
- }
-
- if walletPath != "" {
- regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath})
- }
-
- if adminWalletPath != "" {
- regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath})
- }
-
- if alphabet == nil && regularWallets == nil {
- commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided"))
- }
-
- ac, err := helper.NewLocalActor(c, alphabet, regularWallets...)
+ ac, err := helper.NewLocalActor(cmd, c)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
r := management.NewReader(ac.Invoker)
nnsCs, err := helper.GetContractByID(r, 1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
- return client.New(ac, nnsCs.Hash), ac
-}
-
-func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) {
- c, err := helper.NewRemoteClient(viper.GetViper())
- commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
-
- inv := invoker.New(c, nil)
- r := management.NewReader(inv)
- nnsCs, err := helper.GetContractByID(r, 1)
- commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
-
- return client.NewReader(inv, nnsCs.Hash), inv
+ return client.New(ac, nnsCs.Hash), ac, nnsCs.Hash
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
index 9cb47356f..0e217eb61 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/spf13/cobra"
)
@@ -19,7 +20,6 @@ func initAddRecordCmd() {
addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
- addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag)
@@ -29,6 +29,7 @@ func initAddRecordCmd() {
func initGetRecordsCmd() {
Cmd.AddCommand(getRecordsCmd)
getRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ getRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
getRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
getRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
@@ -41,28 +42,13 @@ func initDelRecordsCmd() {
delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
- delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
}
-func initDelRecordCmd() {
- Cmd.AddCommand(delRecordCmd)
- delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
- delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
- delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
-
- _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
- _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)
- _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag)
-}
-
func addRecord(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
+ c, actor, _ := getRPCClient(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
@@ -78,16 +64,16 @@ func addRecord(cmd *cobra.Command, _ []string) {
}
func getRecords(cmd *cobra.Command, _ []string) {
- c, inv := nnsReader(cmd)
+ c, act, hash := getRPCClient(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
if recordType == "" {
- sid, r, err := c.GetAllRecords(name)
+ sid, r, err := unwrap.SessionIterator(act.Invoker.Call(hash, "getAllRecords", name))
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
defer func() {
- _ = inv.TerminateSession(sid)
+ _ = act.Invoker.TerminateSession(sid)
}()
- items, err := inv.TraverseIterator(sid, &r, 0)
+ items, err := act.Invoker.TraverseIterator(sid, &r, 0)
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
for len(items) != 0 {
for j := range items {
@@ -98,7 +84,7 @@ func getRecords(cmd *cobra.Command, _ []string) {
recordTypeToString(nns.RecordType(rs[1].Value().(*big.Int).Int64())),
string(bs))
}
- items, err = inv.TraverseIterator(sid, &r, 0)
+ items, err = act.Invoker.TraverseIterator(sid, &r, 0)
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
}
} else {
@@ -115,7 +101,7 @@ func getRecords(cmd *cobra.Command, _ []string) {
}
func delRecords(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
+ c, actor, _ := getRPCClient(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
typ, err := getRecordType(recordType)
@@ -129,22 +115,6 @@ func delRecords(cmd *cobra.Command, _ []string) {
cmd.Println("Records removed successfully")
}
-func delRecord(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
- recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
- typ, err := getRecordType(recordType)
- commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
- h, vub, err := c.DeleteRecord(name, typ, data)
- commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "delete records error: %w", err)
- cmd.Println("Record removed successfully")
-}
-
func getRecordType(recordType string) (*big.Int, error) {
switch strings.ToUpper(recordType) {
case "A":
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/register.go b/cmd/frostfs-adm/internal/modules/morph/nns/register.go
new file mode 100644
index 000000000..d05d9f171
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/register.go
@@ -0,0 +1,44 @@
+package nns
+
+import (
+ "math/big"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "github.com/spf13/cobra"
+)
+
+func initRegisterCmd() {
+ Cmd.AddCommand(registerCmd)
+ registerCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ registerCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ registerCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+ registerCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email")
+ registerCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, "SOA record REFRESH parameter")
+ registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
+ registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
+ registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
+
+ _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
+}
+
+func registerDomain(cmd *cobra.Command, _ []string) {
+ c, actor, _ := getRPCClient(cmd)
+
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ email, _ := cmd.Flags().GetString(nnsEmailFlag)
+ refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag)
+ retry, _ := cmd.Flags().GetInt64(nnsRetryFlag)
+ expire, _ := cmd.Flags().GetInt64(nnsExpireFlag)
+ ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag)
+
+ h, vub, err := c.Register(name, actor.Sender(), email, big.NewInt(refresh),
+ big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl))
+ commonCmd.ExitOnErr(cmd, "unable to register domain: %w", err)
+
+ cmd.Println("Waiting for transaction to persist...")
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "register domain error: %w", err)
+ cmd.Println("Domain registered successfully")
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go
index 53bd943f0..b13092240 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go
@@ -14,7 +14,7 @@ func initRenewCmd() {
}
func renewDomain(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
+ c, actor, _ := getRPCClient(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
h, vub, err := c.Renew(name)
commonCmd.ExitOnErr(cmd, "unable to renew domain: %w", err)
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
index bb84933c6..e528e4b7b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
@@ -39,20 +39,9 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: registerDomain,
}
- deleteCmd = &cobra.Command{
- Use: "delete",
- Short: "Delete a domain by name",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- },
- Run: deleteDomain,
- }
renewCmd = &cobra.Command{
Use: "renew",
Short: "Increases domain expiration date",
@@ -77,7 +66,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: addRecord,
}
@@ -95,42 +83,17 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: delRecords,
}
- delRecordCmd = &cobra.Command{
- Use: "delete-record",
- Short: "Removes domain record with the specified type and data",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- },
- Run: delRecord,
- }
- setAdminCmd = &cobra.Command{
- Use: "set-admin",
- Short: "Sets admin for domain",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath))
- },
- Run: setAdmin,
- }
)
func init() {
initTokensCmd()
initRegisterCmd()
- initDeleteCmd()
initRenewCmd()
initUpdateCmd()
initAddRecordCmd()
initGetRecordsCmd()
initDelRecordsCmd()
- initDelRecordCmd()
- initSetAdminCmd()
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
index 4ccbb1677..6e8ffb40a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
@@ -1,65 +1,24 @@
package nns
import (
- "math/big"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/spf13/cobra"
)
-const (
- verboseDesc = "Include additional information about CNAME record."
-)
-
func initTokensCmd() {
Cmd.AddCommand(tokensCmd)
tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc)
+ tokensCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func listTokens(cmd *cobra.Command, _ []string) {
- c, _ := nnsReader(cmd)
+ c, _, _ := getRPCClient(cmd)
it, err := c.Tokens()
commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err)
for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) {
for _, token := range toks {
- output := string(token)
- if verbose, _ := cmd.Flags().GetBool(commonflags.Verbose); verbose {
- cname, err := getCnameRecord(c, token)
- commonCmd.ExitOnErr(cmd, "", err)
- if cname != "" {
- output += " (CNAME: " + cname + ")"
- }
- }
- cmd.Println(output)
+ cmd.Println(string(token))
}
}
}
-
-func getCnameRecord(c *client.ContractReader, token []byte) (string, error) {
- items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME)))
-
- // GetRecords returns the error "not an array" if the domain does not contain records.
- if err != nil && strings.Contains(err.Error(), "not an array") {
- return "", nil
- }
-
- if err != nil {
- return "", err
- }
-
- if len(items) == 0 {
- return "", nil
- }
-
- record, err := items[0].TryBytes()
- if err != nil {
- return "", err
- }
-
- return string(record), nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/update.go b/cmd/frostfs-adm/internal/modules/morph/nns/update.go
index c6d77ead6..3437316e3 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/update.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/update.go
@@ -30,7 +30,7 @@ func initUpdateCmd() {
}
func updateSOA(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
+ c, actor, _ := getRPCClient(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
email, _ := cmd.Flags().GetString(nnsEmailFlag)
diff --git a/cmd/frostfs-adm/internal/modules/morph/node/remove.go b/cmd/frostfs-adm/internal/modules/morph/node/remove.go
index e47451e0c..0a19102ba 100644
--- a/cmd/frostfs-adm/internal/modules/morph/node/remove.go
+++ b/cmd/frostfs-adm/internal/modules/morph/node/remove.go
@@ -53,7 +53,7 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error {
int64(netmapcontract.NodeStateOffline), nodeKeys[i].Bytes())
}
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, 1); err != nil {
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go
index 3435926c0..9b213da4e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go
+++ b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"math/big"
+ "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@@ -40,8 +41,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
}
accHash := w.GetChangeAddress()
- addr, _ := cmd.Flags().GetString(walletAccountFlag)
- if addr != "" {
+ if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
accHash, err = address.StringToUint160(addr)
if err != nil {
return fmt.Errorf("invalid address: %s", addr)
@@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't find account for %s", accHash)
}
- prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
+ prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
pass, err := input.ReadPassword(prompt)
if err != nil {
return fmt.Errorf("can't get password: %v", err)
@@ -73,16 +73,23 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return err
}
- till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
- if till <= 0 {
- return errInvalidNotaryDepositLifetime
+ till := int64(defaultNotaryDepositLifetime)
+ tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
+ if err != nil {
+ return err
+ }
+ if tillStr != "" {
+ till, err = strconv.ParseInt(tillStr, 10, 64)
+ if err != nil || till <= 0 {
+ return errInvalidNotaryDepositLifetime
+ }
}
return transferGas(cmd, acc, accHash, gasAmount, till)
}
func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/root.go b/cmd/frostfs-adm/internal/modules/morph/notary/root.go
index d7be2e503..497ff8ea1 100644
--- a/cmd/frostfs-adm/internal/modules/morph/notary/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/notary/root.go
@@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
- DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
+ DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
}
func init() {
diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go
index f2932e87c..36547e22c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go
@@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error {
}
func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
inv := invoker.New(c, nil)
@@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
- _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
- _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
- _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
+ _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
+ _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
+ _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
_ = tw.Flush()
cmd.Print(buf.String())
diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go
index 24cda45a6..cb575b657 100644
--- a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go
@@ -20,32 +20,23 @@ const (
accountAddressFlag = "account"
)
-func parseAddresses(cmd *cobra.Command) []util.Uint160 {
- var addrs []util.Uint160
-
- accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
- for _, acc := range accs {
- addr, err := address.StringToUint160(acc)
- commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
-
- addrs = append(addrs, addr)
- }
- return addrs
-}
-
func addProxyAccount(cmd *cobra.Command, _ []string) {
- addrs := parseAddresses(cmd)
- err := processAccount(cmd, addrs, "addAccount")
+ acc, _ := cmd.Flags().GetString(accountAddressFlag)
+ addr, err := address.StringToUint160(acc)
+ commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
+ err = processAccount(cmd, addr, "addAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
func removeProxyAccount(cmd *cobra.Command, _ []string) {
- addrs := parseAddresses(cmd)
- err := processAccount(cmd, addrs, "removeAccount")
+ acc, _ := cmd.Flags().GetString(accountAddressFlag)
+ addr, err := address.StringToUint160(acc)
+ commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
+ err = processAccount(cmd, addr, "removeAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
-func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
+func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
@@ -63,9 +54,7 @@ func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) err
}
bw := io.NewBufBinWriter()
- for _, addr := range addrs {
- emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
- }
+ emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err
diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
index ad89af2b5..082bc57d1 100644
--- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
@@ -29,16 +29,12 @@ var (
func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
- _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
- AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
}
func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
- _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
- RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
}
func init() {
diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go
index cc8225c7a..defd898c8 100644
--- a/cmd/frostfs-adm/internal/modules/root.go
+++ b/cmd/frostfs-adm/internal/modules/root.go
@@ -5,9 +5,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
@@ -41,8 +41,8 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
+ rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(metabase.RootCmd)
- rootCmd.AddCommand(maintenance.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go
new file mode 100644
index 000000000..77183fb49
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/storagecfg/config.go
@@ -0,0 +1,137 @@
+package storagecfg
+
+const configTemplate = `logger:
+ level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
+
+node:
+ wallet:
+ path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
+ address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
+ password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
+ addresses: # list of addresses announced by Storage node in the Network map
+ - {{ .AnnouncedAddress }}
+ attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
+ relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
+
+grpc:
+ num: 1 # total number of listener endpoints
+ 0:
+ endpoint: {{ .Endpoint }} # endpoint for gRPC server
+ tls:{{if .TLSCert}}
+ enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
+ certificate: {{ .TLSCert }} # path to TLS certificate
+ key: {{ .TLSKey }} # path to TLS key
+ {{- else }}
+ enabled: false # disable TLS for a gRPC connection
+ {{- end}}
+
+control:
+ authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
+ {{- range .AuthorizedKeys }}
+ - {{.}}{{end}}
+ grpc:
+ endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
+
+morph:
+ dial_timeout: 20s # timeout for side chain NEO RPC client connection
+ cache_ttl: 15s # use TTL cache for side chain GET operations
+ rpc_endpoint: # side chain N3 RPC endpoints
+ {{- range .MorphRPC }}
+ - address: wss://{{.}}/ws{{end}}
+{{if not .Relay }}
+storage:
+ shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
+
+ shard:
+ default: # section with the default shard parameters
+ metabase:
+ perm: 0644 # permissions for metabase files(directories: +x for current user and group)
+
+ blobstor:
+ perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
+ depth: 2 # max depth of object tree storage in FS
+ small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
+ compress: true # turn on/off Zstandard compression (level 3) of stored objects
+ compression_exclude_content_types:
+ - audio/*
+ - video/*
+
+ blobovnicza:
+ size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
+ depth: 1 # max depth of object tree storage in key-value DB
+ width: 4 # max width of object tree storage in key-value DB
+ opened_cache_capacity: 50 # maximum number of opened database files
+ opened_cache_ttl: 5m # ttl for opened database file
+ opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
+
+ gc:
+ remover_batch_size: 200 # number of objects to be removed by the garbage collector
+ remover_sleep_interval: 5m # frequency of the garbage collector invocation
+ 0:
+ mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
+
+ metabase:
+ path: {{ .MetabasePath }} # path to the metabase
+
+ blobstor:
+ path: {{ .BlobstorPath }} # path to the blobstor
+{{end}}`
+
+const (
+ neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
+ balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
+ neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
+ balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
+)
+
+var n3config = map[string]struct {
+ MorphRPC []string
+ RPC []string
+ NeoFSContract string
+ BalanceContract string
+}{
+ "testnet": {
+ MorphRPC: []string{
+ "rpc01.morph.testnet.fs.neo.org:51331",
+ "rpc02.morph.testnet.fs.neo.org:51331",
+ "rpc03.morph.testnet.fs.neo.org:51331",
+ "rpc04.morph.testnet.fs.neo.org:51331",
+ "rpc05.morph.testnet.fs.neo.org:51331",
+ "rpc06.morph.testnet.fs.neo.org:51331",
+ "rpc07.morph.testnet.fs.neo.org:51331",
+ },
+ RPC: []string{
+ "rpc01.testnet.n3.nspcc.ru:21331",
+ "rpc02.testnet.n3.nspcc.ru:21331",
+ "rpc03.testnet.n3.nspcc.ru:21331",
+ "rpc04.testnet.n3.nspcc.ru:21331",
+ "rpc05.testnet.n3.nspcc.ru:21331",
+ "rpc06.testnet.n3.nspcc.ru:21331",
+ "rpc07.testnet.n3.nspcc.ru:21331",
+ },
+ NeoFSContract: neofsTestnetAddress,
+ BalanceContract: balanceTestnetAddress,
+ },
+ "mainnet": {
+ MorphRPC: []string{
+ "rpc1.morph.fs.neo.org:40341",
+ "rpc2.morph.fs.neo.org:40341",
+ "rpc3.morph.fs.neo.org:40341",
+ "rpc4.morph.fs.neo.org:40341",
+ "rpc5.morph.fs.neo.org:40341",
+ "rpc6.morph.fs.neo.org:40341",
+ "rpc7.morph.fs.neo.org:40341",
+ },
+ RPC: []string{
+ "rpc1.n3.nspcc.ru:10331",
+ "rpc2.n3.nspcc.ru:10331",
+ "rpc3.n3.nspcc.ru:10331",
+ "rpc4.n3.nspcc.ru:10331",
+ "rpc5.n3.nspcc.ru:10331",
+ "rpc6.n3.nspcc.ru:10331",
+ "rpc7.n3.nspcc.ru:10331",
+ },
+ NeoFSContract: neofsMainnetAddress,
+ BalanceContract: balanceMainnetAddress,
+ },
+}
diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go
new file mode 100644
index 000000000..127272da5
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/storagecfg/root.go
@@ -0,0 +1,433 @@
+package storagecfg
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math/rand"
+ "net"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+
+ netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "github.com/chzyer/readline"
+ "github.com/nspcc-dev/neo-go/cli/flags"
+ "github.com/nspcc-dev/neo-go/cli/input"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+
+ "github.com/spf13/cobra"
+)
+
+const (
+ walletFlag = "wallet"
+ accountFlag = "account"
+)
+
+const (
+ defaultControlEndpoint = "localhost:8090"
+ defaultDataEndpoint = "localhost"
+)
+
+// RootCmd is a root command of config section.
+var RootCmd = &cobra.Command{
+ Use: "storage-config [-w wallet] [-a acccount] []",
+ Short: "Section for storage node configuration commands",
+ Run: storageConfig,
+}
+
+func init() {
+ fs := RootCmd.Flags()
+
+ fs.StringP(walletFlag, "w", "", "Path to wallet")
+ fs.StringP(accountFlag, "a", "", "Wallet account")
+}
+
+type config struct {
+ AnnouncedAddress string
+ AuthorizedKeys []string
+ ControlEndpoint string
+ Endpoint string
+ TLSCert string
+ TLSKey string
+ MorphRPC []string
+ Attribute struct {
+ Locode string
+ }
+ Wallet struct {
+ Path string
+ Account string
+ Password string
+ }
+ Relay bool
+ BlobstorPath string
+ MetabasePath string
+}
+
+func storageConfig(cmd *cobra.Command, args []string) {
+ outPath := getOutputPath(args)
+
+ historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
+ readline.SetHistoryPath(historyPath)
+
+ var c config
+
+ c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
+ if c.Wallet.Path == "" {
+ c.Wallet.Path = getPath("Path to the storage node wallet: ")
+ }
+
+ w, err := wallet.NewWalletFromFile(c.Wallet.Path)
+ fatalOnErr(err)
+
+ fillWalletAccount(cmd, &c, w)
+
+ accH, err := flags.ParseAddress(c.Wallet.Account)
+ fatalOnErr(err)
+
+ acc := w.GetAccount(accH)
+ if acc == nil {
+ fatalOnErr(errors.New("can't find account in wallet"))
+ }
+
+ c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
+ fatalOnErr(err)
+
+ err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
+ fatalOnErr(err)
+
+ c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
+
+ network := readNetwork(cmd)
+
+ c.MorphRPC = n3config[network].MorphRPC
+
+ depositGas(cmd, acc, network)
+
+ c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
+
+ endpoint := getDefaultEndpoint(cmd, &c)
+ c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
+ if c.Endpoint == "" {
+ c.Endpoint = endpoint
+ }
+
+ c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
+ if c.ControlEndpoint == "" {
+ c.ControlEndpoint = defaultControlEndpoint
+ }
+
+ c.TLSCert = getPath("TLS Certificate (optional): ")
+ if c.TLSCert != "" {
+ c.TLSKey = getPath("TLS Key: ")
+ }
+
+ c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
+ if !c.Relay {
+ p := getPath("Path to the storage directory (all available storage will be used): ")
+ c.BlobstorPath = filepath.Join(p, "blob")
+ c.MetabasePath = filepath.Join(p, "meta")
+ }
+
+ out := applyTemplate(c)
+ fatalOnErr(os.WriteFile(outPath, out, 0o644))
+
+ cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
+}
+
+func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
+ var addr, port string
+ for {
+ c.AnnouncedAddress = getString("Publicly announced address: ")
+ validator := netutil.Address{}
+ err := validator.FromString(c.AnnouncedAddress)
+ if err != nil {
+ cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
+ continue
+ }
+ uriAddr, err := url.Parse(validator.URIAddr())
+ if err != nil {
+ panic(fmt.Errorf("unexpected error: %w", err))
+ }
+ addr = uriAddr.Hostname()
+ port = uriAddr.Port()
+ ip, err := net.ResolveIPAddr("ip", addr)
+ if err != nil {
+ cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
+ continue
+ }
+
+ if !ip.IP.IsGlobalUnicast() {
+ cmd.Println("IP must be global unicast.")
+ continue
+ }
+ cmd.Printf("Resolved IP address: %s\n", ip.String())
+
+ _, err = strconv.ParseUint(port, 10, 16)
+ if err != nil {
+ cmd.Println("Port must be an integer.")
+ continue
+ }
+
+ break
+ }
+ return net.JoinHostPort(defaultDataEndpoint, port)
+}
+
+func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
+ c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
+ if c.Wallet.Account == "" {
+ addr := address.Uint160ToString(w.GetChangeAddress())
+ c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
+ if c.Wallet.Account == "" {
+ c.Wallet.Account = addr
+ }
+ }
+}
+
+func readNetwork(cmd *cobra.Command) string {
+ var network string
+ for {
+ network = getString("Choose network [mainnet]/testnet: ")
+ switch network {
+ case "":
+ network = "mainnet"
+ case "testnet", "mainnet":
+ default:
+ cmd.Println(`Network must be either "mainnet" or "testnet"`)
+ continue
+ }
+ break
+ }
+ return network
+}
+
+func getOutputPath(args []string) string {
+ if len(args) != 0 {
+ return args[0]
+ }
+ outPath := getPath("File to write config at [./config.yml]: ")
+ if outPath == "" {
+ outPath = "./config.yml"
+ }
+ return outPath
+}
+
+func getWalletAccount(w *wallet.Wallet, prompt string) string {
+ addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
+ for i := range w.Accounts {
+ addrs[i] = readline.PcItem(w.Accounts[i].Address)
+ }
+
+ readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
+ defer readline.SetAutoComplete(nil)
+
+ s, err := readline.Line(prompt)
+ fatalOnErr(err)
+ return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
+}
+
+func getString(prompt string) string {
+ s, err := readline.Line(prompt)
+ fatalOnErr(err)
+ if s != "" {
+ _ = readline.AddHistory(s)
+ }
+ return s
+}
+
+type filenameCompleter struct{}
+
+func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
+ prefix := string(line[:pos])
+ dir := filepath.Dir(prefix)
+ de, err := os.ReadDir(dir)
+ if err != nil {
+ return nil, 0
+ }
+
+ for i := range de {
+ name := filepath.Join(dir, de[i].Name())
+ if strings.HasPrefix(name, prefix) {
+ tail := []rune(strings.TrimPrefix(name, prefix))
+ if de[i].IsDir() {
+ tail = append(tail, filepath.Separator)
+ }
+ newLine = append(newLine, tail)
+ }
+ }
+ if pos != 0 {
+ return newLine, pos - len([]rune(dir))
+ }
+ return newLine, 0
+}
+
+func getPath(prompt string) string {
+ readline.SetAutoComplete(filenameCompleter{})
+ defer readline.SetAutoComplete(nil)
+
+ p, err := readline.Line(prompt)
+ fatalOnErr(err)
+
+ if p == "" {
+ return p
+ }
+
+ _ = readline.AddHistory(p)
+
+ abs, err := filepath.Abs(p)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
+ }
+
+ return abs
+}
+
+func getConfirmation(def bool, prompt string) bool {
+ for {
+ s, err := readline.Line(prompt)
+ fatalOnErr(err)
+
+ switch strings.ToLower(s) {
+ case "y", "yes":
+ return true
+ case "n", "no":
+ return false
+ default:
+ if len(s) == 0 {
+ return def
+ }
+ }
+ }
+}
+
+func applyTemplate(c config) []byte {
+ tmpl, err := template.New("config").Parse(configTemplate)
+ fatalOnErr(err)
+
+ b := bytes.NewBuffer(nil)
+ fatalOnErr(tmpl.Execute(b, c))
+
+ return b.Bytes()
+}
+
+func fatalOnErr(err error) {
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
+ sideClient := initClient(n3config[network].MorphRPC)
+ balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
+
+ sideActor, err := actor.NewSimple(sideClient, acc)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
+ }
+
+ sideGas := nep17.NewReader(sideActor, balanceHash)
+ accSH := acc.Contract.ScriptHash()
+
+ balance, err := sideGas.BalanceOf(accSH)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("side chain balance: %w", err))
+ }
+
+ ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
+ fixedn.ToString(balance, 12)))
+ if !ok {
+ return
+ }
+
+ amountStr := getString("Enter amount in GAS: ")
+ amount, err := fixedn.FromString(amountStr, 8)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("invalid amount: %w", err))
+ }
+
+ mainClient := initClient(n3config[network].RPC)
+ neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
+
+ mainActor, err := actor.NewSimple(mainClient, acc)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
+ }
+
+ mainGas := nep17.New(mainActor, gas.Hash)
+
+ txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
+ }
+
+ cmd.Print("Waiting for transactions to persist.")
+ tick := time.NewTicker(time.Second / 2)
+ defer tick.Stop()
+
+ timer := time.NewTimer(time.Second * 20)
+ defer timer.Stop()
+
+ at := trigger.Application
+
+loop:
+ for {
+ select {
+ case <-tick.C:
+ _, err := mainClient.GetApplicationLog(txHash, &at)
+ if err == nil {
+ cmd.Print("\n")
+ break loop
+ }
+ cmd.Print(".")
+ case <-timer.C:
+ cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
+ if getConfirmation(false, "Continue configuration? yes/[no]: ") {
+ return
+ }
+ os.Exit(1)
+ }
+ }
+}
+
+func initClient(rpc []string) *rpcclient.Client {
+ var c *rpcclient.Client
+ var err error
+
+ shuffled := make([]string, len(rpc))
+ copy(shuffled, rpc)
+ rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
+
+ for _, endpoint := range shuffled {
+ c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
+ DialTimeout: time.Second * 2,
+ RequestTimeout: time.Second * 5,
+ })
+ if err != nil {
+ continue
+ }
+ if err = c.Init(); err != nil {
+ continue
+ }
+ return c
+ }
+
+ fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
+ panic("unreachable")
+}
diff --git a/cmd/frostfs-cli/docs/sessions.md b/cmd/frostfs-cli/docs/sessions.md
index 52c0e9b9b..04563b7af 100644
--- a/cmd/frostfs-cli/docs/sessions.md
+++ b/cmd/frostfs-cli/docs/sessions.md
@@ -72,3 +72,4 @@ All other `object` sub-commands support only static sessions (2).
List of commands supporting sessions (static only):
- `create`
- `delete`
+- `set-eacl`
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index 299d0a830..03a987a57 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -9,6 +9,8 @@ import (
"io"
"os"
"slices"
+ "sort"
+ "strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@@ -76,29 +78,13 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers()
- slices.SortFunc(list, cid.ID.Cmp)
+ sort.Slice(list, func(i, j int) bool {
+ lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
+ return strings.Compare(lhs, rhs) < 0
+ })
return list
}
-func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
- cliPrm := &client.PrmContainerListStream{
- XHeaders: prm.XHeaders,
- OwnerID: prm.OwnerID,
- Session: prm.Session,
- }
- rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
- if err != nil {
- return fmt.Errorf("init container list: %w", err)
- }
-
- err = rdr.Iterate(processCnr)
- if err != nil {
- return fmt.Errorf("read container list: %w", err)
- }
-
- return
-}
-
// PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct {
Client *client.Client
@@ -579,6 +565,13 @@ type HeadObjectPrm struct {
commonObjectPrm
objectAddressPrm
rawPrm
+
+ mainOnly bool
+}
+
+// SetMainOnlyFlag sets flag to get only main fields of an object header in terms of FrostFS API.
+func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) {
+ x.mainOnly = v
}
// HeadObjectRes groups the resulting values of HeadObject operation.
@@ -673,7 +666,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
for {
n, ok = rdr.Read(buf)
- list = append(list, buf[:n]...)
+ for i := range n {
+ list = append(list, buf[i])
+ }
if !ok {
break
}
@@ -684,7 +679,10 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
return nil, fmt.Errorf("read object list: %w", err)
}
- slices.SortFunc(list, oid.ID.Cmp)
+ sort.Slice(list, func(i, j int) bool {
+ lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
+ return strings.Compare(lhs, rhs) < 0
+ })
return &SearchObjectsRes{
ids: list,
@@ -858,8 +856,6 @@ type PatchObjectPrm struct {
ReplaceAttribute bool
- NewSplitHeader *objectSDK.SplitHeader
-
PayloadPatches []PayloadPatch
}
@@ -890,11 +886,7 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
return nil, fmt.Errorf("init payload reading: %w", err)
}
- if patcher.PatchHeader(ctx, client.PatchHeaderPrm{
- NewSplitHeader: prm.NewSplitHeader,
- NewAttributes: prm.NewAttributes,
- ReplaceAttributes: prm.ReplaceAttribute,
- }) {
+ if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
for _, pp := range prm.PayloadPatches {
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
if err != nil {
diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go
index 1eadfa2e1..f7c48b871 100644
--- a/cmd/frostfs-cli/internal/client/sdk.go
+++ b/cmd/frostfs-cli/internal/client/sdk.go
@@ -56,9 +56,8 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
prmDial := client.PrmDial{
Endpoint: addr.URIAddr(),
GRPCDialOptions: []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
+ grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
},
}
if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
diff --git a/cmd/frostfs-cli/internal/common/tracing.go b/cmd/frostfs-cli/internal/common/tracing.go
index 10863ed1e..30c2f2b1a 100644
--- a/cmd/frostfs-cli/internal/common/tracing.go
+++ b/cmd/frostfs-cli/internal/common/tracing.go
@@ -2,7 +2,7 @@ package common
import (
"context"
- "slices"
+ "sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
@@ -45,11 +45,15 @@ func StartClientCommandSpan(cmd *cobra.Command) {
})
commonCmd.ExitOnErr(cmd, "init tracing: %w", err)
- var components []string
+ var components sort.StringSlice
for c := cmd; c != nil; c = c.Parent() {
components = append(components, c.Name())
}
- slices.Reverse(components)
+ for i, j := 0, len(components)-1; i < j; {
+ components.Swap(i, j)
+ i++
+ j--
+ }
operation := strings.Join(components, ".")
ctx, span := tracing.StartSpanFromContext(cmd.Context(), operation)
diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go
index 6ed21e107..88321176f 100644
--- a/cmd/frostfs-cli/internal/commonflags/api.go
+++ b/cmd/frostfs-cli/internal/commonflags/api.go
@@ -9,7 +9,7 @@ const (
TTL = "ttl"
TTLShorthand = ""
TTLDefault = 2
- TTLUsage = "The maximum number of intermediate nodes in the request route"
+ TTLUsage = "TTL value in request meta header"
XHeadersKey = "xhdr"
XHeadersShorthand = "x"
diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go
index fad1f6183..cd46d63eb 100644
--- a/cmd/frostfs-cli/internal/commonflags/flags.go
+++ b/cmd/frostfs-cli/internal/commonflags/flags.go
@@ -28,7 +28,7 @@ const (
RPC = "rpc-endpoint"
RPCShorthand = "r"
RPCDefault = ""
- RPCUsage = "Remote node address (':' or 'grpcs://:')"
+ RPCUsage = "Remote node address (as 'multiaddr' or ':')"
Timeout = "timeout"
TimeoutShorthand = "t"
diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
index f4039283f..c6622da25 100644
--- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go
+++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
@@ -1,19 +1,45 @@
package apemanager
import (
- "fmt"
+ "encoding/hex"
+ "errors"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
+const (
+ chainIDFlag = "chain-id"
+ chainIDHexFlag = "chain-id-hex"
+ ruleFlag = "rule"
+ pathFlag = "path"
+)
+
+const (
+ targetNameFlag = "target-name"
+ targetNameDesc = "Resource name in APE resource name format"
+ targetTypeFlag = "target-type"
+ targetTypeDesc = "Resource type(container/namespace)"
+)
+
+const (
+ defaultNamespace = ""
+ namespaceTarget = "namespace"
+ containerTarget = "container"
+ userTarget = "user"
+ groupTarget = "group"
+)
+
+var errUnknownTargetType = errors.New("unknown target type")
+
var addCmd = &cobra.Command{
Use: "add",
Short: "Add rule chain for a target",
@@ -24,28 +50,55 @@ var addCmd = &cobra.Command{
}
func parseTarget(cmd *cobra.Command) (ct apeSDK.ChainTarget) {
- t := apeCmd.ParseTarget(cmd)
+ typ, _ := cmd.Flags().GetString(targetTypeFlag)
+ name, _ := cmd.Flags().GetString(targetNameFlag)
- ct.Name = t.Name
+ ct.Name = name
- switch t.Type {
- case engine.Namespace:
+ switch typ {
+ case namespaceTarget:
ct.TargetType = apeSDK.TargetTypeNamespace
- case engine.Container:
+ case containerTarget:
+ var cnr cid.ID
+ commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
ct.TargetType = apeSDK.TargetTypeContainer
- case engine.User:
+ case userTarget:
ct.TargetType = apeSDK.TargetTypeUser
- case engine.Group:
+ case groupTarget:
ct.TargetType = apeSDK.TargetTypeGroup
default:
- commonCmd.ExitOnErr(cmd, "conversion error: %w", fmt.Errorf("unknown type '%c'", t.Type))
+ commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
}
return ct
}
func parseChain(cmd *cobra.Command) apeSDK.Chain {
- c := apeCmd.ParseChain(cmd)
- serialized := c.Bytes()
+ chainID, _ := cmd.Flags().GetString(chainIDFlag)
+ hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
+
+ chainIDRaw := []byte(chainID)
+
+ if hexEncoded {
+ var err error
+ chainIDRaw, err = hex.DecodeString(chainID)
+ commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
+ }
+
+ chain := new(apechain.Chain)
+ chain.ID = apechain.ID(chainIDRaw)
+
+ if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules))
+ } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
+ commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath))
+ } else {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", errors.New("rule is not passed"))
+ }
+
+ cmd.Println("Parsed chain:")
+ util.PrintHumanReadableAPEChain(cmd, chain)
+
+ serialized := chain.Bytes()
return apeSDK.Chain{
Raw: serialized,
}
@@ -74,13 +127,13 @@ func initAddCmd() {
commonflags.Init(addCmd)
ff := addCmd.Flags()
- ff.StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc)
- ff.String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc)
- ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc)
+ ff.StringArray(ruleFlag, []string{}, "Rule statement")
+ ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format")
+ ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
+ ff.String(targetNameFlag, "", targetNameDesc)
+ ff.String(targetTypeFlag, "", targetTypeDesc)
+ _ = addCmd.MarkFlagRequired(targetTypeFlag)
+ ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
- addCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag)
+ addCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag)
}
diff --git a/cmd/frostfs-cli/modules/ape_manager/list_chain.go b/cmd/frostfs-cli/modules/ape_manager/list_chain.go
index b07ecc52f..a5dd44614 100644
--- a/cmd/frostfs-cli/modules/ape_manager/list_chain.go
+++ b/cmd/frostfs-cli/modules/ape_manager/list_chain.go
@@ -4,8 +4,8 @@ import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
@@ -35,7 +35,7 @@ func list(cmd *cobra.Command, _ []string) {
for _, respChain := range resp.Chains {
var chain apechain.Chain
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(respChain.Raw))
- apeCmd.PrintHumanReadableAPEChain(cmd, &chain)
+ apeutil.PrintHumanReadableAPEChain(cmd, &chain)
}
}
@@ -43,7 +43,7 @@ func initListCmd() {
commonflags.Init(listCmd)
ff := listCmd.Flags()
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = listCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ ff.String(targetNameFlag, "", targetNameDesc)
+ ff.String(targetTypeFlag, "", targetTypeDesc)
+ _ = listCmd.MarkFlagRequired(targetTypeFlag)
}
diff --git a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go
index 136ca81c3..179bd5c9e 100644
--- a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go
+++ b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go
@@ -1,23 +1,29 @@
package apemanager
import (
+ "encoding/hex"
+ "errors"
+
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
-var removeCmd = &cobra.Command{
- Use: "remove",
- Short: "Remove rule chain for a target",
- Run: remove,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
- commonflags.Bind(cmd)
- },
-}
+var (
+ errEmptyChainID = errors.New("chain id cannot be empty")
+
+ removeCmd = &cobra.Command{
+ Use: "remove",
+ Short: "Remove rule chain for a target",
+ Run: remove,
+ PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ commonflags.Bind(cmd)
+ },
+ }
+)
func remove(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
@@ -25,9 +31,19 @@ func remove(cmd *cobra.Command, _ []string) {
key := key.Get(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
- chainID := apeCmd.ParseChainID(cmd)
+ chainID, _ := cmd.Flags().GetString(chainIDFlag)
+ if chainID == "" {
+ commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID)
+ }
chainIDRaw := []byte(chainID)
+ hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
+ if hexEncoded {
+ var err error
+ chainIDRaw, err = hex.DecodeString(chainID)
+ commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
+ }
+
_, err := cli.APEManagerRemoveChain(cmd.Context(), client_sdk.PrmAPEManagerRemoveChain{
ChainTarget: target,
ChainID: chainIDRaw,
@@ -42,10 +58,9 @@ func initRemoveCmd() {
commonflags.Init(removeCmd)
ff := removeCmd.Flags()
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = removeCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- _ = removeCmd.MarkFlagRequired(apeCmd.ChainIDFlag)
- ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc)
+ ff.String(targetNameFlag, "", targetNameDesc)
+ ff.String(targetTypeFlag, "", targetTypeDesc)
+ _ = removeCmd.MarkFlagRequired(targetTypeFlag)
+ ff.String(chainIDFlag, "", "Chain id")
+ ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
}
diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go
index 0927788ba..a86506c37 100644
--- a/cmd/frostfs-cli/modules/bearer/create.go
+++ b/cmd/frostfs-cli/modules/bearer/create.go
@@ -44,7 +44,6 @@ is set to current epoch + n.
_ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account))
- _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC))
},
}
@@ -82,7 +81,7 @@ func createToken(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err)
if iatRelative || expRelative || nvbRelative {
- endpoint := viper.GetString(commonflags.RPC)
+ endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
if len(endpoint) == 0 {
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC))
}
diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go
index 9632061f1..482c0027e 100644
--- a/cmd/frostfs-cli/modules/bearer/generate_override.go
+++ b/cmd/frostfs-cli/modules/bearer/generate_override.go
@@ -1,20 +1,31 @@
package bearer
import (
+ "errors"
"fmt"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
+ parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
+var (
+ errChainIDCannotBeEmpty = errors.New("chain id cannot be empty")
+ errRuleIsNotParsed = errors.New("rule is not passed")
+)
+
const (
- outputFlag = "output"
+ chainIDFlag = "chain-id"
+ chainIDHexFlag = "chain-id-hex"
+ ruleFlag = "rule"
+ pathFlag = "path"
+ outputFlag = "output"
)
var generateAPEOverrideCmd = &cobra.Command{
@@ -29,7 +40,7 @@ Generated APE override can be dumped to a file in JSON format that is passed to
}
func genereateAPEOverride(cmd *cobra.Command, _ []string) {
- c := apeCmd.ParseChain(cmd)
+ c := parseChain(cmd)
targetCID, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cid cidSDK.ID
@@ -52,7 +63,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" {
- err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
+ err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else {
fmt.Print("\n")
@@ -66,11 +77,39 @@ func init() {
ff.StringP(commonflags.CIDFlag, "", "", "Target container ID.")
_ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.CIDFlag)
- ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement")
- ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format")
- ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain")
- ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.StringArray(ruleFlag, []string{}, "Rule statement")
+ ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format")
+ ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
+ ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
ff.String(outputFlag, "", "Output path to dump result JSON-encoded APE override")
_ = cobra.MarkFlagFilename(createCmd.Flags(), outputFlag)
}
+
+func parseChainID(cmd *cobra.Command) apechain.ID {
+ chainID, _ := cmd.Flags().GetString(chainIDFlag)
+ if chainID == "" {
+ commonCmd.ExitOnErr(cmd, "read chain id error: %w",
+ errChainIDCannotBeEmpty)
+ }
+ return apechain.ID(chainID)
+}
+
+func parseChain(cmd *cobra.Command) *apechain.Chain {
+ chain := new(apechain.Chain)
+
+ if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules))
+ } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
+ commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath))
+ } else {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed)
+ }
+
+ chain.ID = parseChainID(cmd)
+
+ cmd.Println("Parsed chain:")
+ parseutil.PrintHumanReadableAPEChain(cmd, chain)
+
+ return chain
+}
diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go
index 30f995180..f37b169ce 100644
--- a/cmd/frostfs-cli/modules/container/create.go
+++ b/cmd/frostfs-cli/modules/container/create.go
@@ -7,20 +7,22 @@ import (
"strings"
"time"
+ containerApi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- containerApi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
)
var (
+ containerACL string
containerPolicy string
containerAttributes []string
containerAwait bool
@@ -87,6 +89,9 @@ It will be stored in sidechain when inner ring will accepts it.`,
err = parseAttributes(&cnr, containerAttributes)
commonCmd.ExitOnErr(cmd, "", err)
+ var basicACL acl.Basic
+ commonCmd.ExitOnErr(cmd, "decode basic ACL string: %w", basicACL.DecodeString(containerACL))
+
tok := getSession(cmd)
if tok != nil {
@@ -100,6 +105,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
}
cnr.SetPlacementPolicy(*placementPolicy)
+ cnr.SetBasicACL(basicACL)
var syncContainerPrm internalclient.SyncContainerPrm
syncContainerPrm.SetClient(cli)
@@ -157,6 +163,10 @@ func initContainerCreateCmd() {
flags.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage)
flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
+
+ flags.StringVar(&containerACL, "basic-acl", acl.NamePrivate, fmt.Sprintf("HEX encoded basic ACL value or keywords like '%s', '%s', '%s'",
+ acl.NamePublicRW, acl.NamePrivate, acl.NamePublicROExtended,
+ ))
flags.StringVarP(&containerPolicy, "policy", "p", "", "QL-encoded or JSON-encoded placement policy or path to file with it")
flags.StringSliceVarP(&containerAttributes, "attributes", "a", nil, "Comma separated pairs of container attributes in form of Key1=Value1,Key2=Value2")
flags.BoolVar(&containerAwait, "await", false, "Block execution until container is persisted")
diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go
index fac6eb2cd..8c4ab14f8 100644
--- a/cmd/frostfs-cli/modules/container/get.go
+++ b/cmd/frostfs-cli/modules/container/get.go
@@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod
cmd.Println("created:", container.CreatedAt(cnr))
cmd.Println("attributes:")
- for key, val := range cnr.Attributes() {
+ cnr.IterateAttributes(func(key, val string) {
cmd.Printf("\t%s=%s\n", key, val)
- }
+ })
cmd.Println("placement policy:")
commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd)))
diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go
index e4a023d91..f01e4db4d 100644
--- a/cmd/frostfs-cli/modules/container/list.go
+++ b/cmd/frostfs-cli/modules/container/list.go
@@ -6,11 +6,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
// flags of list command.
@@ -54,60 +51,44 @@ var listContainersCmd = &cobra.Command{
var prm internalclient.ListContainersPrm
prm.SetClient(cli)
- prm.OwnerID = idUser
+ prm.Account = idUser
+
+ res, err := internalclient.ListContainers(cmd.Context(), prm)
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
prmGet := internalclient.GetContainerPrm{
Client: cli,
}
- var containerIDs []cid.ID
-
- err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
- printContainer(cmd, prmGet, id)
- return false
- })
- if err == nil {
- return
- }
-
- if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
- res, err := internalclient.ListContainers(cmd.Context(), prm)
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
- containerIDs = res.SortedIDList()
- } else {
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
- }
+ containerIDs := res.SortedIDList()
for _, cnrID := range containerIDs {
- printContainer(cmd, prmGet, cnrID)
+ if flagVarListName == "" && !flagVarListPrintAttr {
+ cmd.Println(cnrID.String())
+ continue
+ }
+
+ prmGet.ClientParams.ContainerID = &cnrID
+ res, err := internalclient.GetContainer(cmd.Context(), prmGet)
+ if err != nil {
+ cmd.Printf(" failed to read attributes: %v\n", err)
+ continue
+ }
+
+ cnr := res.Container()
+ if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
+ continue
+ }
+ cmd.Println(cnrID.String())
+
+ if flagVarListPrintAttr {
+ cnr.IterateUserAttributes(func(key, val string) {
+ cmd.Printf(" %s: %s\n", key, val)
+ })
+ }
}
},
}
-func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
- if flagVarListName == "" && !flagVarListPrintAttr {
- cmd.Println(id.String())
- return
- }
-
- prmGet.ClientParams.ContainerID = &id
- res, err := internalclient.GetContainer(cmd.Context(), prmGet)
- if err != nil {
- cmd.Printf(" failed to read attributes: %v\n", err)
- return
- }
-
- cnr := res.Container()
- if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
- return
- }
- cmd.Println(id.String())
-
- if flagVarListPrintAttr {
- for key, val := range cnr.Attributes() {
- cmd.Printf(" %s: %s\n", key, val)
- }
- }
-}
-
func initContainerListContainersCmd() {
commonflags.Init(listContainersCmd)
diff --git a/cmd/frostfs-cli/modules/container/list_objects.go b/cmd/frostfs-cli/modules/container/list_objects.go
index d5850359d..ff2f8cf45 100644
--- a/cmd/frostfs-cli/modules/container/list_objects.go
+++ b/cmd/frostfs-cli/modules/container/list_objects.go
@@ -1,6 +1,9 @@
package container
import (
+ "strings"
+
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
@@ -64,8 +67,14 @@ var listContainerObjectsCmd = &cobra.Command{
resHead, err := internalclient.HeadObject(cmd.Context(), prmHead)
if err == nil {
- for _, attr := range resHead.Header().UserAttributes() {
- cmd.Printf(" %s: %s\n", attr.Key(), attr.Value())
+ attrs := resHead.Header().Attributes()
+ for i := range attrs {
+ attrKey := attrs[i].Key()
+ if !strings.HasPrefix(attrKey, v2object.SysAttributePrefix) && !strings.HasPrefix(attrKey, v2object.SysAttributePrefixNeoFS) {
+ // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
+ // Use dedicated method to skip system attributes.
+ cmd.Printf(" %s: %s\n", attrKey, attrs[i].Value())
+ }
}
} else {
cmd.Printf(" failed to read attributes: %v\n", err)
diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go
index cf4862b4a..095ab6438 100644
--- a/cmd/frostfs-cli/modules/container/policy_playground.go
+++ b/cmd/frostfs-cli/modules/container/policy_playground.go
@@ -1,13 +1,12 @@
package container
import (
+ "bufio"
"encoding/hex"
"encoding/json"
- "errors"
"fmt"
- "maps"
+ "io"
"os"
- "slices"
"strings"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@@ -15,22 +14,20 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/chzyer/readline"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
type policyPlaygroundREPL struct {
- cmd *cobra.Command
- nodes map[string]netmap.NodeInfo
- console *readline.Instance
+ cmd *cobra.Command
+ nodes map[string]netmap.NodeInfo
}
-func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
+func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{
cmd: cmd,
nodes: map[string]netmap.NodeInfo{},
- }
+ }, nil
}
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@@ -40,10 +37,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error {
i := 1
for id, node := range repl.nodes {
var attrs []string
- for k, v := range node.Attributes() {
+ node.IterateAttributes(func(k, v string) {
attrs = append(attrs, fmt.Sprintf("%s:%q", k, v))
- }
- fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
+ })
+ fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
i++
}
return nil
@@ -150,29 +147,12 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error {
for _, node := range ns {
ids = append(ids, hex.EncodeToString(node.PublicKey()))
}
- fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids)
+ fmt.Printf("\t%2d: %v\n", i+1, ids)
}
return nil
}
-func (repl *policyPlaygroundREPL) handleHelp(args []string) error {
- if len(args) != 0 {
- if _, ok := commands[args[0]]; !ok {
- return fmt.Errorf("unknown command: %q", args[0])
- }
- fmt.Fprintln(repl.console, commands[args[0]].usage)
- return nil
- }
-
- commandList := slices.Collect(maps.Keys(commands))
- slices.Sort(commandList)
- for _, command := range commandList {
- fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion)
- }
- return nil
-}
-
func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
var nm netmap.NetMap
var nodes []netmap.NodeInfo
@@ -183,105 +163,6 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
return nm
}
-type commandDescription struct {
- descriprion string
- usage string
-}
-
-var commands = map[string]commandDescription{
- "list": {
- descriprion: "Display all nodes in the netmap",
- usage: `Display all nodes in the netmap
-Example of usage:
- list
- 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
- 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
-`,
- },
-
- "ls": {
- descriprion: "Display all nodes in the netmap",
- usage: `Display all nodes in the netmap
-Example of usage:
- ls
- 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
- 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
-`,
- },
-
- "add": {
- descriprion: "Add a new node: add attr=value",
- usage: `Add a new node
-Example of usage:
- add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`,
- },
-
- "load": {
- descriprion: "Load netmap from file: load ",
- usage: `Load netmap from file
-Example of usage:
- load "netmap.json"
-File format (netmap.json):
-{
- "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": {
- "continent": "Europe",
- "country": "Poland"
- },
- "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": {
- "continent": "Antarctica",
- "country": "Heard Island"
- }
-}`,
- },
-
- "remove": {
- descriprion: "Remove a node: remove ",
- usage: `Remove a node
-Example of usage:
- remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
- },
-
- "rm": {
- descriprion: "Remove a node: rm ",
- usage: `Remove a node
-Example of usage:
- rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
- },
-
- "eval": {
- descriprion: "Evaluate a policy: eval ",
- usage: `Evaluate a policy
-Example of usage:
- eval REP 2`,
- },
-
- "help": {
- descriprion: "Show available commands",
- },
-}
-
-func (repl *policyPlaygroundREPL) handleCommand(args []string) error {
- if len(args) == 0 {
- return nil
- }
-
- switch args[0] {
- case "list", "ls":
- return repl.handleLs(args[1:])
- case "add":
- return repl.handleAdd(args[1:])
- case "load":
- return repl.handleLoad(args[1:])
- case "remove", "rm":
- return repl.handleRemove(args[1:])
- case "eval":
- return repl.handleEval(args[1:])
- case "help":
- return repl.handleHelp(args[1:])
- }
- return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0])
-}
-
func (repl *policyPlaygroundREPL) run() error {
if len(viper.GetString(commonflags.RPC)) > 0 {
key := key.GetOrGenerate(repl.cmd)
@@ -299,51 +180,36 @@ func (repl *policyPlaygroundREPL) run() error {
}
}
- if len(viper.GetString(netmapConfigPath)) > 0 {
- err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)})
- commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err)
+ cmdHandlers := map[string]func([]string) error{
+ "list": repl.handleLs,
+ "ls": repl.handleLs,
+ "add": repl.handleAdd,
+ "load": repl.handleLoad,
+ "remove": repl.handleRemove,
+ "rm": repl.handleRemove,
+ "eval": repl.handleEval,
}
-
- var cfgCompleter []readline.PrefixCompleterInterface
- var helpSubItems []readline.PrefixCompleterInterface
-
- for name := range commands {
- if name != "help" {
- cfgCompleter = append(cfgCompleter, readline.PcItem(name))
- helpSubItems = append(helpSubItems, readline.PcItem(name))
- }
- }
-
- cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...))
- completer := readline.NewPrefixCompleter(cfgCompleter...)
- rl, err := readline.NewEx(&readline.Config{
- Prompt: "> ",
- InterruptPrompt: "^C",
- AutoComplete: completer,
- })
- if err != nil {
- return fmt.Errorf("error initializing readline: %w", err)
- }
- repl.console = rl
- defer rl.Close()
-
- var exit bool
- for {
- line, err := rl.Readline()
+ for reader := bufio.NewReader(os.Stdin); ; {
+ fmt.Print("> ")
+ line, err := reader.ReadString('\n')
if err != nil {
- if errors.Is(err, readline.ErrInterrupt) {
- if exit {
- return nil
- }
- exit = true
- continue
+ if err == io.EOF {
+ return nil
}
- return fmt.Errorf("reading line: %w", err)
+ return fmt.Errorf("reading line: %v", err)
}
- exit = false
-
- if err := repl.handleCommand(strings.Fields(line)); err != nil {
- fmt.Fprintf(repl.console, "error: %v\n", err)
+ parts := strings.Fields(line)
+ if len(parts) == 0 {
+ continue
+ }
+ cmd := parts[0]
+ handler, exists := cmdHandlers[cmd]
+ if exists {
+ if err := handler(parts[1:]); err != nil {
+ fmt.Printf("error: %v\n", err)
+ }
+ } else {
+ fmt.Printf("error: unknown command %q\n", cmd)
}
}
}
@@ -354,19 +220,12 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) {
- repl := newPolicyPlaygroundREPL(cmd)
+ repl, err := newPolicyPlaygroundREPL(cmd)
+ commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
},
}
-const (
- netmapConfigPath = "netmap-config"
- netmapConfigUsage = "Path to the netmap configuration file"
-)
-
func initContainerPolicyPlaygroundCmd() {
commonflags.Init(policyPlaygroundCmd)
- policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage)
-
- _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath))
}
diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go
index 42f229ad9..a22d0525d 100644
--- a/cmd/frostfs-cli/modules/control/add_rule.go
+++ b/cmd/frostfs-cli/modules/control/add_rule.go
@@ -1,14 +1,23 @@
package control
import (
+ "encoding/hex"
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
+const (
+ ruleFlag = "rule"
+ pathFlag = "path"
+)
+
var addRuleCmd = &cobra.Command{
Use: "add-rule",
Short: "Add local override",
@@ -22,12 +31,41 @@ control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid ..
Run: addRule,
}
+func parseChain(cmd *cobra.Command) *apechain.Chain {
+ chainID, _ := cmd.Flags().GetString(chainIDFlag)
+ hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
+
+ chainIDRaw := []byte(chainID)
+
+ if hexEncoded {
+ var err error
+ chainIDRaw, err = hex.DecodeString(chainID)
+ commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
+ }
+
+ chain := new(apechain.Chain)
+ chain.ID = apechain.ID(chainIDRaw)
+
+ if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
+ commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules))
+ } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
+ commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath))
+ } else {
+ commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed"))
+ }
+
+ cmd.Println("Parsed chain:")
+ util.PrintHumanReadableAPEChain(cmd, chain)
+
+ return chain
+}
+
func addRule(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
target := parseTarget(cmd)
- parsed := apeCmd.ParseChain(cmd)
+ parsed := parseChain(cmd)
req := &control.AddChainLocalOverrideRequest{
Body: &control.AddChainLocalOverrideRequest_Body{
@@ -56,13 +94,13 @@ func initControlAddRuleCmd() {
initControlFlags(addRuleCmd)
ff := addRuleCmd.Flags()
- ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement")
- ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format")
- ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain")
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addRuleCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.StringArray(ruleFlag, []string{}, "Rule statement")
+ ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format")
+ ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
+ ff.String(targetNameFlag, "", targetNameDesc)
+ ff.String(targetTypeFlag, "", targetTypeDesc)
+ _ = addRuleCmd.MarkFlagRequired(targetTypeFlag)
+ ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
- addRuleCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag)
+ addRuleCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/detach_shards.go b/cmd/frostfs-cli/modules/control/detach_shards.go
index 025a6e561..5e5b60c3d 100644
--- a/cmd/frostfs-cli/modules/control/detach_shards.go
+++ b/cmd/frostfs-cli/modules/control/detach_shards.go
@@ -1,10 +1,10 @@
package control
import (
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/doctor.go b/cmd/frostfs-cli/modules/control/doctor.go
index 632cdd6a7..13bb81a0a 100644
--- a/cmd/frostfs-cli/modules/control/doctor.go
+++ b/cmd/frostfs-cli/modules/control/doctor.go
@@ -1,10 +1,10 @@
package control
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/drop_objects.go b/cmd/frostfs-cli/modules/control/drop_objects.go
index dcc1c1229..8c0bb2332 100644
--- a/cmd/frostfs-cli/modules/control/drop_objects.go
+++ b/cmd/frostfs-cli/modules/control/drop_objects.go
@@ -1,10 +1,10 @@
package control
import (
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go
new file mode 100644
index 000000000..458e4cc0b
--- /dev/null
+++ b/cmd/frostfs-cli/modules/control/evacuate_shard.go
@@ -0,0 +1,56 @@
+package control
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "github.com/spf13/cobra"
+)
+
+const ignoreErrorsFlag = "no-errors"
+
+var evacuateShardCmd = &cobra.Command{
+ Use: "evacuate",
+ Short: "Evacuate objects from shard",
+ Long: "Evacuate objects from shard to other shards",
+ Run: evacuateShard,
+ Deprecated: "use frostfs-cli control shards evacuation start",
+}
+
+func evacuateShard(cmd *cobra.Command, _ []string) {
+ pk := key.Get(cmd)
+
+ req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
+ req.Body.Shard_ID = getShardIDList(cmd)
+ req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
+
+ signRequest(cmd, pk, req)
+
+ cli := getClient(cmd, pk)
+
+ var resp *control.EvacuateShardResponse
+ var err error
+ err = cli.ExecRaw(func(client *client.Client) error {
+ resp, err = control.EvacuateShard(client, req)
+ return err
+ })
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
+
+ verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
+
+ cmd.Println("Shard has successfully been evacuated.")
+}
+
+func initControlEvacuateShardCmd() {
+ initControlFlags(evacuateShardCmd)
+
+ flags := evacuateShardCmd.Flags()
+ flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
+ flags.Bool(shardAllFlag, false, "Process all shards")
+ flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
+
+ evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
+}
diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go
index b8d7eb046..6fa5ed75c 100644
--- a/cmd/frostfs-cli/modules/control/evacuation.go
+++ b/cmd/frostfs-cli/modules/control/evacuation.go
@@ -7,24 +7,19 @@ import (
"sync/atomic"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
const (
- awaitFlag = "await"
- noProgressFlag = "no-progress"
- scopeFlag = "scope"
- repOneOnlyFlag = "rep-one-only"
- ignoreErrorsFlag = "no-errors"
-
- containerWorkerCountFlag = "container-worker-count"
- objectWorkerCountFlag = "object-worker-count"
+ awaitFlag = "await"
+ noProgressFlag = "no-progress"
+ scopeFlag = "scope"
scopeAll = "all"
scopeObjects = "objects"
@@ -69,18 +64,12 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
- containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag)
- objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag)
- repOneOnly, _ := cmd.Flags().GetBool(repOneOnlyFlag)
req := &control.StartShardEvacuationRequest{
Body: &control.StartShardEvacuationRequest_Body{
- Shard_ID: getShardIDList(cmd),
- IgnoreErrors: ignoreErrors,
- Scope: getEvacuationScope(cmd),
- ContainerWorkerCount: containerWorkerCount,
- ObjectWorkerCount: objectWorkerCount,
- RepOneOnly: repOneOnly,
+ Shard_ID: getShardIDList(cmd),
+ IgnoreErrors: ignoreErrors,
+ Scope: getEvacuationScope(cmd),
},
}
@@ -296,7 +285,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
leftMinutes := int(leftSeconds / 60)
- fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes)
+ sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes))
}
func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@@ -305,20 +294,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
hour := int(duration.Seconds() / 3600)
minute := int(duration.Seconds()/60) % 60
second := int(duration.Seconds()) % 60
- fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second)
+ sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second))
}
}
func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if resp.GetBody().GetStartedAt() != nil {
startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC()
- fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339))
+ sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339)))
}
}
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if len(resp.GetBody().GetErrorMessage()) > 0 {
- fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage())
+ sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage()))
}
}
@@ -332,7 +321,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes
default:
status = "undefined"
}
- fmt.Fprintf(sb, " Status: %s.", status)
+ sb.WriteString(fmt.Sprintf(" Status: %s.", status))
}
func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@@ -350,14 +339,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
}
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
- fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
+ sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
resp.GetBody().GetEvacuatedObjects(),
resp.GetBody().GetTotalObjects(),
resp.GetBody().GetFailedObjects(),
resp.GetBody().GetSkippedObjects(),
resp.GetBody().GetEvacuatedTrees(),
resp.GetBody().GetTotalTrees(),
- resp.GetBody().GetFailedTrees())
+ resp.GetBody().GetFailedTrees()))
}
func initControlEvacuationShardCmd() {
@@ -382,9 +371,6 @@ func initControlStartEvacuationShardCmd() {
flags.String(scopeFlag, scopeAll, fmt.Sprintf("Evacuation scope; possible values: %s, %s, %s", scopeTrees, scopeObjects, scopeAll))
flags.Bool(awaitFlag, false, "Block execution until evacuation is completed")
flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag))
- flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers")
- flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers")
- flags.Bool(repOneOnlyFlag, false, "Evacuate objects only from containers with policy 'REP 1 ...'")
startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/flush_cache.go b/cmd/frostfs-cli/modules/control/flush_cache.go
index 280aacfad..541961903 100644
--- a/cmd/frostfs-cli/modules/control/flush_cache.go
+++ b/cmd/frostfs-cli/modules/control/flush_cache.go
@@ -1,10 +1,10 @@
package control
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/get_rule.go b/cmd/frostfs-cli/modules/control/get_rule.go
index 4da903a9a..050cf165c 100644
--- a/cmd/frostfs-cli/modules/control/get_rule.go
+++ b/cmd/frostfs-cli/modules/control/get_rule.go
@@ -3,11 +3,11 @@ package control
import (
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
@@ -24,8 +24,8 @@ func getRule(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
- chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag)
- hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag)
+ chainID, _ := cmd.Flags().GetString(chainIDFlag)
+ hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
if hexEncoded {
chainIDBytes, err := hex.DecodeString(chainID)
@@ -56,16 +56,16 @@ func getRule(cmd *cobra.Command, _ []string) {
var chain apechain.Chain
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(resp.GetBody().GetChain()))
- apecmd.PrintHumanReadableAPEChain(cmd, &chain)
+ util.PrintHumanReadableAPEChain(cmd, &chain)
}
func initControGetRuleCmd() {
initControlFlags(getRuleCmd)
ff := getRuleCmd.Flags()
- ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc)
- ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc)
- _ = getRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag)
- ff.String(apecmd.ChainIDFlag, "", "Chain id")
- ff.Bool(apecmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.String(targetNameFlag, "", targetNameDesc)
+ ff.String(targetTypeFlag, "", targetTypeDesc)
+ _ = getRuleCmd.MarkFlagRequired(targetTypeFlag)
+ ff.String(chainIDFlag, "", "Chain id")
+ ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
}
diff --git a/cmd/frostfs-cli/modules/control/healthcheck.go b/cmd/frostfs-cli/modules/control/healthcheck.go
index 1d4441f1e..2241a403f 100644
--- a/cmd/frostfs-cli/modules/control/healthcheck.go
+++ b/cmd/frostfs-cli/modules/control/healthcheck.go
@@ -3,11 +3,11 @@ package control
import (
"os"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/ir_healthcheck.go b/cmd/frostfs-cli/modules/control/ir_healthcheck.go
index 373f21c30..4f272c1b4 100644
--- a/cmd/frostfs-cli/modules/control/ir_healthcheck.go
+++ b/cmd/frostfs-cli/modules/control/ir_healthcheck.go
@@ -3,12 +3,12 @@ package control
import (
"os"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/ir_remove_container.go b/cmd/frostfs-cli/modules/control/ir_remove_container.go
index 460e299e5..a66d7e06d 100644
--- a/cmd/frostfs-cli/modules/control/ir_remove_container.go
+++ b/cmd/frostfs-cli/modules/control/ir_remove_container.go
@@ -1,13 +1,13 @@
package control
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
diff --git a/cmd/frostfs-cli/modules/control/ir_remove_node.go b/cmd/frostfs-cli/modules/control/ir_remove_node.go
index 2fe686d63..412dc7934 100644
--- a/cmd/frostfs-cli/modules/control/ir_remove_node.go
+++ b/cmd/frostfs-cli/modules/control/ir_remove_node.go
@@ -4,11 +4,11 @@ import (
"encoding/hex"
"errors"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
index 5f09e92c1..6965b5dca 100644
--- a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
+++ b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
@@ -1,11 +1,11 @@
package control
import (
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/list_rules.go b/cmd/frostfs-cli/modules/control/list_rules.go
index a6c65d083..f5fc27bda 100644
--- a/cmd/frostfs-cli/modules/control/list_rules.go
+++ b/cmd/frostfs-cli/modules/control/list_rules.go
@@ -1,16 +1,18 @@
package control
import (
+ "errors"
"fmt"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "github.com/nspcc-dev/neo-go/cli/input"
"github.com/spf13/cobra"
)
@@ -21,25 +23,65 @@ var listRulesCmd = &cobra.Command{
Run: listRules,
}
-var engineToControlSvcType = map[policyengine.TargetType]control.ChainTarget_TargetType{
- policyengine.Namespace: control.ChainTarget_NAMESPACE,
- policyengine.Container: control.ChainTarget_CONTAINER,
- policyengine.User: control.ChainTarget_USER,
- policyengine.Group: control.ChainTarget_GROUP,
-}
+const (
+ defaultNamespace = "root"
+ namespaceTarget = "namespace"
+ containerTarget = "container"
+ userTarget = "user"
+ groupTarget = "group"
+)
+
+const (
+ targetNameFlag = "target-name"
+ targetNameDesc = "Resource name in APE resource name format"
+ targetTypeFlag = "target-type"
+ targetTypeDesc = "Resource type(container/namespace)"
+)
+
+var (
+ errSettingDefaultValueWasDeclined = errors.New("setting default value was declined")
+ errUnknownTargetType = errors.New("unknown target type")
+)
func parseTarget(cmd *cobra.Command) *control.ChainTarget {
- target := apeCmd.ParseTarget(cmd)
-
- typ, ok := engineToControlSvcType[target.Type]
- if !ok {
- commonCmd.ExitOnErr(cmd, "%w", fmt.Errorf("unknown type '%c", target.Type))
- }
-
- return &control.ChainTarget{
- Name: target.Name,
- Type: typ,
+ typ, _ := cmd.Flags().GetString(targetTypeFlag)
+ name, _ := cmd.Flags().GetString(targetNameFlag)
+ switch typ {
+ case namespaceTarget:
+ if name == "" {
+ ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace))
+ commonCmd.ExitOnErr(cmd, "read line error: %w", err)
+ ln = strings.ToLower(ln)
+ if len(ln) > 0 && (ln[0] == 'n') {
+ commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined)
+ }
+ name = defaultNamespace
+ }
+ return &control.ChainTarget{
+ Name: name,
+ Type: control.ChainTarget_NAMESPACE,
+ }
+ case containerTarget:
+ var cnr cid.ID
+ commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
+ return &control.ChainTarget{
+ Name: name,
+ Type: control.ChainTarget_CONTAINER,
+ }
+ case userTarget:
+ return &control.ChainTarget{
+ Name: name,
+ Type: control.ChainTarget_USER,
+ }
+ case groupTarget:
+ return &control.ChainTarget{
+ Name: name,
+ Type: control.ChainTarget_GROUP,
+ }
+ default:
+ commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
}
+ return nil
}
func listRules(cmd *cobra.Command, _ []string) {
@@ -75,7 +117,7 @@ func listRules(cmd *cobra.Command, _ []string) {
for _, c := range chains {
var chain apechain.Chain
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(c))
- apeCmd.PrintHumanReadableAPEChain(cmd, &chain)
+ util.PrintHumanReadableAPEChain(cmd, &chain)
}
}
@@ -83,7 +125,7 @@ func initControlListRulesCmd() {
initControlFlags(listRulesCmd)
ff := listRulesCmd.Flags()
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = listRulesCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
+ ff.String(targetNameFlag, "", targetNameDesc)
+ ff.String(targetTypeFlag, "", targetTypeDesc)
+ _ = listRulesCmd.MarkFlagRequired(targetTypeFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go
index 3142d02e7..6a988c355 100644
--- a/cmd/frostfs-cli/modules/control/list_targets.go
+++ b/cmd/frostfs-cli/modules/control/list_targets.go
@@ -2,20 +2,26 @@ package control
import (
"bytes"
+ "crypto/sha256"
"fmt"
"strconv"
"text/tabwriter"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
+const (
+ chainNameFlag = "chain-name"
+ chainNameFlagUsage = "Chain name(ingress|s3)"
+)
+
var listTargetsCmd = &cobra.Command{
Use: "list-targets",
Short: "List local targets",
@@ -26,11 +32,15 @@ var listTargetsCmd = &cobra.Command{
func listTargets(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
- chainName := apeCmd.ParseChainName(cmd)
+ var cnr cid.ID
+ chainName, _ := cmd.Flags().GetString(chainNameFlag)
+
+ rawCID := make([]byte, sha256.Size)
+ cnr.Encode(rawCID)
req := &control.ListTargetsLocalOverridesRequest{
Body: &control.ListTargetsLocalOverridesRequest_Body{
- ChainName: string(chainName),
+ ChainName: chainName,
},
}
@@ -62,7 +72,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write([]byte("#\tName\tType\n"))
for i, t := range targets {
- _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
}
_ = tw.Flush()
cmd.Print(buf.String())
@@ -72,7 +82,7 @@ func initControlListTargetsCmd() {
initControlFlags(listTargetsCmd)
ff := listTargetsCmd.Flags()
- ff.String(apeCmd.ChainNameFlag, "", apeCmd.ChainNameFlagDesc)
+ ff.String(chainNameFlag, "", chainNameFlagUsage)
- _ = cobra.MarkFlagRequired(ff, apeCmd.ChainNameFlag)
+ _ = cobra.MarkFlagRequired(ff, chainNameFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go
deleted file mode 100644
index 4cb4be539..000000000
--- a/cmd/frostfs-cli/modules/control/locate.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package control
-
-import (
- "bytes"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/mr-tron/base58"
- "github.com/spf13/cobra"
-)
-
-const (
- FullInfoFlag = "full"
- FullInfoFlagUsage = "Print full ShardInfo."
-)
-
-var locateObjectCmd = &cobra.Command{
- Use: "locate-object",
- Short: "List shards storing the object",
- Long: "List shards storing the object",
- Run: locateObject,
-}
-
-func initControlLocateObjectCmd() {
- initControlFlags(locateObjectCmd)
-
- flags := locateObjectCmd.Flags()
-
- flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
-
- flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
- _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
-
- flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
- flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
-}
-
-func locateObject(cmd *cobra.Command, _ []string) {
- var cnr cid.ID
- var obj oid.ID
-
- _ = object.ReadObjectAddress(cmd, &cnr, &obj)
-
- pk := key.Get(cmd)
-
- body := new(control.ListShardsForObjectRequest_Body)
- body.SetContainerId(cnr.EncodeToString())
- body.SetObjectId(obj.EncodeToString())
- req := new(control.ListShardsForObjectRequest)
- req.SetBody(body)
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var err error
- var resp *control.ListShardsForObjectResponse
- err = cli.ExecRaw(func(client *rawclient.Client) error {
- resp, err = control.ListShardsForObject(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- shardIDs := resp.GetBody().GetShard_ID()
-
- isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
- if !isFull {
- for _, id := range shardIDs {
- cmd.Println(base58.Encode(id))
- }
- return
- }
-
- // get full shard info
- listShardsReq := new(control.ListShardsRequest)
- listShardsReq.SetBody(new(control.ListShardsRequest_Body))
- signRequest(cmd, pk, listShardsReq)
- var listShardsResp *control.ListShardsResponse
- err = cli.ExecRaw(func(client *rawclient.Client) error {
- listShardsResp, err = control.ListShards(client, listShardsReq)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
-
- shards := listShardsResp.GetBody().GetShards()
- sortShardsByID(shards)
- shards = filterShards(shards, shardIDs)
-
- isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
- if isJSON {
- prettyPrintShardsJSON(cmd, shards)
- } else {
- prettyPrintShards(cmd, shards)
- }
-}
-
-func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
- var res []control.ShardInfo
- for _, id := range ids {
- for _, inf := range info {
- if bytes.Equal(inf.Shard_ID, id) {
- res = append(res, inf)
- }
- }
- }
- return res
-}
diff --git a/cmd/frostfs-cli/modules/control/rebuild_shards.go b/cmd/frostfs-cli/modules/control/rebuild_shards.go
index 3df12a15d..e2b408712 100644
--- a/cmd/frostfs-cli/modules/control/rebuild_shards.go
+++ b/cmd/frostfs-cli/modules/control/rebuild_shards.go
@@ -3,10 +3,10 @@ package control
import (
"fmt"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/remove_rule.go b/cmd/frostfs-cli/modules/control/remove_rule.go
index 036317bcb..4189ea76b 100644
--- a/cmd/frostfs-cli/modules/control/remove_rule.go
+++ b/cmd/frostfs-cli/modules/control/remove_rule.go
@@ -4,14 +4,19 @@ import (
"encoding/hex"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
+const (
+ chainIDFlag = "chain-id"
+ chainIDHexFlag = "chain-id-hex"
+ allFlag = "all"
+)
+
var (
errEmptyChainID = errors.New("chain id cannot be empty")
@@ -25,8 +30,8 @@ var (
func removeRule(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
- hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag)
- removeAll, _ := cmd.Flags().GetBool(apecmd.AllFlag)
+ hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
+ removeAll, _ := cmd.Flags().GetBool(allFlag)
if removeAll {
req := &control.RemoveChainLocalOverridesByTargetRequest{
Body: &control.RemoveChainLocalOverridesByTargetRequest_Body{
@@ -47,7 +52,7 @@ func removeRule(cmd *cobra.Command, _ []string) {
return
}
- chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag)
+ chainID, _ := cmd.Flags().GetString(chainIDFlag)
if chainID == "" {
commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID)
}
@@ -87,11 +92,11 @@ func initControlRemoveRuleCmd() {
initControlFlags(removeRuleCmd)
ff := removeRuleCmd.Flags()
- ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc)
- ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc)
- _ = removeRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag)
- ff.String(apecmd.ChainIDFlag, "", apecmd.ChainIDFlagDesc)
- ff.Bool(apecmd.ChainIDHexFlag, false, apecmd.ChainIDHexFlagDesc)
- ff.Bool(apecmd.AllFlag, false, "Remove all chains")
- removeRuleCmd.MarkFlagsMutuallyExclusive(apecmd.AllFlag, apecmd.ChainIDFlag)
+ ff.String(targetNameFlag, "", targetNameDesc)
+ ff.String(targetTypeFlag, "", targetTypeDesc)
+ _ = removeRuleCmd.MarkFlagRequired(targetTypeFlag)
+ ff.String(chainIDFlag, "", "Chain id")
+ ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
+ ff.Bool(allFlag, false, "Remove all chains")
+ removeRuleCmd.MarkFlagsMutuallyExclusive(allFlag, chainIDFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go
index 3abfe80cb..b20d3618e 100644
--- a/cmd/frostfs-cli/modules/control/root.go
+++ b/cmd/frostfs-cli/modules/control/root.go
@@ -39,7 +39,6 @@ func init() {
listRulesCmd,
getRuleCmd,
listTargetsCmd,
- locateObjectCmd,
)
initControlHealthCheckCmd()
@@ -53,5 +52,4 @@ func init() {
initControlListRulesCmd()
initControGetRuleCmd()
initControlListTargetsCmd()
- initControlLocateObjectCmd()
}
diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go
index 26a1ba883..a107b2b53 100644
--- a/cmd/frostfs-cli/modules/control/set_netmap_status.go
+++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go
@@ -6,12 +6,12 @@ import (
"fmt"
"time"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
@@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.
var resp *control.GetNetmapStatusResponse
var err error
err = cli.ExecRaw(func(client *rawclient.Client) error {
- resp, err = control.GetNetmapStatus(cmd.Context(), client, req)
+ resp, err = control.GetNetmapStatus(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index 3483f5d62..329cb9100 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -13,6 +13,7 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd)
+ shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(evacuationShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd)
@@ -22,6 +23,7 @@ func initControlShardsCmd() {
initControlShardsListCmd()
initControlSetShardModeCmd()
+ initControlEvacuateShardCmd()
initControlEvacuationShardCmd()
initControlFlushCacheCmd()
initControlDoctorCmd()
diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go
index 40d6628ee..a81034a9e 100644
--- a/cmd/frostfs-cli/modules/control/shards_list.go
+++ b/cmd/frostfs-cli/modules/control/shards_list.go
@@ -7,11 +7,11 @@ import (
"sort"
"strings"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/shards_set_mode.go b/cmd/frostfs-cli/modules/control/shards_set_mode.go
index 8fe01ba30..dd0d77748 100644
--- a/cmd/frostfs-cli/modules/control/shards_set_mode.go
+++ b/cmd/frostfs-cli/modules/control/shards_set_mode.go
@@ -6,10 +6,10 @@ import (
"slices"
"strings"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/synchronize_tree.go b/cmd/frostfs-cli/modules/control/synchronize_tree.go
index 1e4575f49..5f2e4da96 100644
--- a/cmd/frostfs-cli/modules/control/synchronize_tree.go
+++ b/cmd/frostfs-cli/modules/control/synchronize_tree.go
@@ -4,12 +4,12 @@ import (
"crypto/sha256"
"errors"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/util.go b/cmd/frostfs-cli/modules/control/util.go
index 41d9dbf8a..ef547681f 100644
--- a/cmd/frostfs-cli/modules/control/util.go
+++ b/cmd/frostfs-cli/modules/control/util.go
@@ -4,11 +4,11 @@ import (
"crypto/ecdsa"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"github.com/spf13/cobra"
diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go
index d0c9a641b..ffe9009ab 100644
--- a/cmd/frostfs-cli/modules/control/writecache.go
+++ b/cmd/frostfs-cli/modules/control/writecache.go
@@ -1,10 +1,10 @@
package control
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
@@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{
var sealWritecacheShardCmd = &cobra.Command{
Use: "seal",
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
- Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.",
+ Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.",
Run: sealWritecache,
}
diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
index 5da66dcd9..b6ec48f35 100644
--- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go
+++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
@@ -49,24 +49,24 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) {
cmd.Println("key:", hex.EncodeToString(i.PublicKey()))
var stateWord string
- switch i.Status() {
+ switch {
default:
stateWord = ""
- case netmap.Online:
+ case i.IsOnline():
stateWord = "online"
- case netmap.Offline:
+ case i.IsOffline():
stateWord = "offline"
- case netmap.Maintenance:
+ case i.IsMaintenance():
stateWord = "maintenance"
}
cmd.Println("state:", stateWord)
- for s := range i.NetworkEndpoints() {
+ netmap.IterateNetworkEndpoints(i, func(s string) {
cmd.Println("address:", s)
- }
+ })
- for key, value := range i.Attributes() {
+ i.IterateAttributes(func(key, value string) {
cmd.Printf("attribute: %s=%s\n", key, value)
- }
+ })
}
diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go
index 08a9ac4c8..e4e9cddb8 100644
--- a/cmd/frostfs-cli/modules/object/delete.go
+++ b/cmd/frostfs-cli/modules/object/delete.go
@@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
}
- objAddr = ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr = readObjectAddress(cmd, &cnr, &obj)
}
pk := key.GetOrGenerate(cmd)
diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go
index 7312f5384..f1edccba2 100644
--- a/cmd/frostfs-cli/modules/object/get.go
+++ b/cmd/frostfs-cli/modules/object/get.go
@@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
filename := cmd.Flag(fileFlag).Value.String()
out, closer := createOutWriter(cmd, filename)
diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go
index 25df375d4..26243e7e7 100644
--- a/cmd/frostfs-cli/modules/object/hash.go
+++ b/cmd/frostfs-cli/modules/object/hash.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
@@ -41,9 +42,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...")
- _ = objectHashCmd.MarkFlagRequired("range")
-
+ flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
}
@@ -52,7 +51,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@@ -67,6 +66,36 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
+ tz := typ == hashTz
+ fullHash := len(ranges) == 0
+ if fullHash {
+ var headPrm internalclient.HeadObjectPrm
+ headPrm.SetClient(cli)
+ Prepare(cmd, &headPrm)
+ headPrm.SetAddress(objAddr)
+
+ // get hash of full payload through HEAD (may be user can do it through dedicated command?)
+ res, err := internalclient.HeadObject(cmd.Context(), headPrm)
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ var cs checksum.Checksum
+ var csSet bool
+
+ if tz {
+ cs, csSet = res.Header().PayloadHomomorphicHash()
+ } else {
+ cs, csSet = res.Header().PayloadChecksum()
+ }
+
+ if csSet {
+ cmd.Println(hex.EncodeToString(cs.Value()))
+ } else {
+ cmd.Println("Missing checksum in object header.")
+ }
+
+ return
+ }
+
var hashPrm internalclient.HashPayloadRangesPrm
hashPrm.SetClient(cli)
Prepare(cmd, &hashPrm)
@@ -75,7 +104,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
hashPrm.SetSalt(salt)
hashPrm.SetRanges(ranges)
- if typ == hashTz {
+ if tz {
hashPrm.TZ()
}
diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go
index 97e996cad..14797dc41 100644
--- a/cmd/frostfs-cli/modules/object/head.go
+++ b/cmd/frostfs-cli/modules/object/head.go
@@ -6,12 +6,12 @@ import (
"fmt"
"os"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -38,6 +38,7 @@ func initObjectHeadCmd() {
_ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String(fileFlag, "", "File to write header to. Default: stdout.")
+ flags.Bool("main-only", false, "Return only main fields")
flags.Bool(commonflags.JSON, false, "Marshal output in JSON")
flags.Bool("proto", false, "Marshal output in Protobuf")
flags.Bool(rawFlag, false, rawFlagDesc)
@@ -47,7 +48,8 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
+ mainOnly, _ := cmd.Flags().GetBool("main-only")
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
@@ -60,6 +62,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
raw, _ := cmd.Flags().GetBool(rawFlag)
prm.SetRawFlag(raw)
prm.SetAddress(objAddr)
+ prm.SetMainOnlyFlag(mainOnly)
res, err := internalclient.HeadObject(cmd.Context(), prm)
if err != nil {
diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go
index d67db9f0d..d2e9af24c 100644
--- a/cmd/frostfs-cli/modules/object/lock.go
+++ b/cmd/frostfs-cli/modules/object/lock.go
@@ -7,18 +7,17 @@ import (
"strconv"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
- "github.com/spf13/viper"
)
// object lock command.
@@ -79,7 +78,7 @@ var objectLockCmd = &cobra.Command{
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
- endpoint := viper.GetString(commonflags.RPC)
+ endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err)
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index 476238651..0eac4e6d2 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -1,6 +1,8 @@
package object
import (
+ "bytes"
+ "cmp"
"context"
"crypto/ecdsa"
"encoding/hex"
@@ -49,12 +51,6 @@ type ecHeader struct {
parent oid.ID
}
-type objectCounter struct {
- sync.Mutex
- total uint32
- isECcounted bool
-}
-
type objectPlacement struct {
requiredNodes []netmapSDK.NodeInfo
confirmedNodes []netmapSDK.NodeInfo
@@ -63,7 +59,6 @@ type objectPlacement struct {
type objectNodesResult struct {
errors []error
placements map[oid.ID]objectPlacement
- total uint32
}
type ObjNodesDataObject struct {
@@ -109,23 +104,23 @@ func initObjectNodesCmd() {
func objectNodes(cmd *cobra.Command, _ []string) {
var cnrID cid.ID
var objID oid.ID
- ReadObjectAddress(cmd, &cnrID, &objID)
+ readObjectAddress(cmd, &cnrID, &objID)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
- objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk)
+ objects := getPhyObjects(cmd, cnrID, objID, cli, pk)
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
result := getRequiredPlacement(cmd, objects, placementPolicy, netmap)
- getActualPlacement(cmd, netmap, pk, objects, count, result)
+ getActualPlacement(cmd, netmap, pk, objects, result)
printPlacement(cmd, objID, objects, result)
}
-func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) {
+func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@@ -153,7 +148,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
parent: res.Header().ECHeader().Parent(),
}
}
- return []phyObject{obj}, 1
+ return []phyObject{obj}
}
var errSplitInfo *objectSDK.SplitInfoError
@@ -163,34 +158,29 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
- return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1
+ return getECObjectChunks(cmd, cnrID, objID, ecInfoError)
}
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
- return nil, 0
+ return nil
}
-func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) {
- members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
- return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total
+func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject {
+ members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
+ return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead)
}
-func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) {
- var total int
+func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
splitInfo := errSplitInfo.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
- if total = len(members); total > 0 {
- total-- // linking object is not data object
- }
- return members, total
+ return members
}
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
- return members, len(members)
+ return members
}
- members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
- return members, len(members)
+ return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
}
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
@@ -205,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
prmHead.SetRawFlag(true) // to get an error instead of whole object
eg, egCtx := errgroup.WithContext(cmd.Context())
- for idx := range members {
+ for idx := range len(members) {
partObjID := members[idx]
eg.Go(func() error {
@@ -333,7 +323,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem
}
placementBuilder := placement.NewNetworkMapBuilder(netmap)
for _, object := range objects {
- placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy)
+ placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
@@ -371,7 +361,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
placementObjectID = object.ecHeader.parent
}
placementBuilder := placement.NewNetworkMapBuilder(netmap)
- placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy)
+ placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
for _, vector := range placement {
@@ -396,11 +386,8 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
}
}
-func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) {
+func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) {
resultMtx := &sync.Mutex{}
- counter := &objectCounter{
- total: uint32(count),
- }
candidates := getNodesToCheckObjectExistance(cmd, netmap, result)
@@ -417,7 +404,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
for _, object := range objects {
eg.Go(func() error {
- stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter)
+ stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
resultMtx.Lock()
defer resultMtx.Unlock()
if err == nil && stored {
@@ -436,7 +423,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
}
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
- result.total = counter.total
}
func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo {
@@ -461,11 +447,17 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
var cli *client.Client
var addresses []string
if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal {
- addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints())
+ candidate.IterateNetworkEndpoints(func(s string) bool {
+ addresses = append(addresses, s)
+ return false
+ })
addresses = append(addresses, candidate.ExternalAddresses()...)
} else {
addresses = append(addresses, candidate.ExternalAddresses()...)
- addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints())
+ candidate.IterateNetworkEndpoints(func(s string) bool {
+ addresses = append(addresses, s)
+ return false
+ })
}
var lastErr error
@@ -489,7 +481,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
return cli, nil
}
-func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) {
+func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@@ -504,14 +496,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
res, err := internalclient.HeadObject(ctx, prmHead)
if err == nil && res != nil {
- if res.Header().ECHeader() != nil {
- counter.Lock()
- defer counter.Unlock()
- if !counter.isECcounted {
- counter.total *= res.Header().ECHeader().Total()
- }
- counter.isECcounted = true
- }
return true, nil
}
var notFound *apistatus.ObjectNotFound
@@ -523,6 +507,7 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
}
func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
+ normilizeObjectNodesResult(objects, result)
if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
printObjectNodesAsJSON(cmd, objID, objects, result)
} else {
@@ -530,9 +515,36 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
}
}
+func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) {
+ slices.SortFunc(objects, func(lhs, rhs phyObject) int {
+ if lhs.ecHeader == nil && rhs.ecHeader == nil {
+ return bytes.Compare(lhs.objectID[:], rhs.objectID[:])
+ }
+ if lhs.ecHeader == nil {
+ return -1
+ }
+ if rhs.ecHeader == nil {
+ return 1
+ }
+ if lhs.ecHeader.parent == rhs.ecHeader.parent {
+ return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index)
+ }
+ return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:])
+ })
+ for _, obj := range objects {
+ op := result.placements[obj.objectID]
+ slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
+ return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
+ })
+ slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
+ return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
+ })
+ result.placements[obj.objectID] = op
+ }
+}
+
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
- fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total)
- fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects))
+ fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
for _, object := range objects {
fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID)
diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go
index ebbde76a2..8f03885ab 100644
--- a/cmd/frostfs-cli/modules/object/patch.go
+++ b/cmd/frostfs-cli/modules/object/patch.go
@@ -2,7 +2,6 @@ package object
import (
"fmt"
- "os"
"strconv"
"strings"
@@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -22,7 +20,6 @@ const (
replaceAttrsFlagName = "replace-attrs"
rangeFlagName = "range"
payloadFlagName = "payload"
- splitHeaderFlagName = "split-header"
)
var objectPatchCmd = &cobra.Command{
@@ -49,18 +46,17 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2")
+ flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
- flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
}
func patch(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeSlice(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@@ -88,8 +84,6 @@ func patch(cmd *cobra.Command, _ []string) {
prm.NewAttributes = newAttrs
prm.ReplaceAttribute = replaceAttrs
- prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
-
for i := range ranges {
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
Range: ranges[i],
@@ -105,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) {
}
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
- rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName)
- if err != nil {
- return nil, err
+ var rawAttrs []string
+
+ raw := cmd.Flag(newAttrsFlagName).Value.String()
+ if len(raw) != 0 {
+ rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
@@ -153,22 +149,3 @@ func patchPayloadPaths(cmd *cobra.Command) []string {
v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
return v
}
-
-func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
- path, _ := cmd.Flags().GetString(splitHeaderFlagName)
- if path == "" {
- return nil
- }
-
- data, err := os.ReadFile(path)
- commonCmd.ExitOnErr(cmd, "read file error: %w", err)
-
- splitHdrV2 := new(objectV2.SplitHeader)
- err = splitHdrV2.Unmarshal(data)
- if err != nil {
- err = splitHdrV2.UnmarshalJSON(data)
- commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
- }
-
- return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
-}
diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go
index 9e8a7cc6f..45e02edb3 100644
--- a/cmd/frostfs-cli/modules/object/put.go
+++ b/cmd/frostfs-cli/modules/object/put.go
@@ -10,11 +10,11 @@ import (
"strings"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -50,7 +50,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2")
+ flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@@ -214,9 +214,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
}
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
- rawAttrs, err := cmd.Flags().GetStringSlice("attributes")
- if err != nil {
- return nil, err
+ var rawAttrs []string
+
+ raw := cmd.Flag("attributes").Value.String()
+ if len(raw) != 0 {
+ rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go
index 6ec508ae2..ad4bc3d59 100644
--- a/cmd/frostfs-cli/modules/object/range.go
+++ b/cmd/frostfs-cli/modules/object/range.go
@@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.StringSlice("range", nil, "Range to take data from in the form offset:length")
+ flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc)
}
@@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool {
if ok {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto")
- if !toJSON && !toProto {
+ if !(toJSON || toProto) {
cmd.PrintErrln("Object is erasure-encoded, ec information received.")
}
printECInfo(cmd, errECInfo.ECInfo())
@@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
}
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
- vs, err := cmd.Flags().GetStringSlice("range")
- if len(vs) == 0 || err != nil {
- return nil, err
+ v := cmd.Flag("range").Value.String()
+ if len(v) == 0 {
+ return nil, nil
}
+ vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs))
for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep)
diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go
index 8e4e8b287..b090c9f8c 100644
--- a/cmd/frostfs-cli/modules/object/util.go
+++ b/cmd/frostfs-cli/modules/object/util.go
@@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
return xs
}
-func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
+func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr)
readOID(cmd, obj)
@@ -262,8 +262,13 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client
if _, ok := dst.(*internal.DeleteObjectPrm); ok {
common.PrintVerbose(cmd, "Collecting relatives of the removal object...")
- objs = collectObjectRelatives(cmd, cli, cnr, *obj)
- objs = append(objs, *obj)
+ rels := collectObjectRelatives(cmd, cli, cnr, *obj)
+
+ if len(rels) == 0 {
+ objs = []oid.ID{*obj}
+ } else {
+ objs = append(rels, *obj)
+ }
}
}
diff --git a/cmd/frostfs-cli/modules/root.go b/cmd/frostfs-cli/modules/root.go
index 88acab341..21c367d29 100644
--- a/cmd/frostfs-cli/modules/root.go
+++ b/cmd/frostfs-cli/modules/root.go
@@ -21,6 +21,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc"
+ "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -111,16 +112,14 @@ func initConfig() {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
- // Find config directory.
- configDir, err := os.UserConfigDir()
- if err != nil {
- common.PrintVerbose(rootCmd, "Get config dir: %s", err)
- } else {
- // Search config in `$XDG_CONFIG_HOME/frostfs-cli/` with name "config.yaml"
- viper.AddConfigPath(filepath.Join(configDir, "frostfs-cli"))
- viper.SetConfigName("config")
- viper.SetConfigType("yaml")
- }
+ // Find home directory.
+ home, err := homedir.Dir()
+ commonCmd.ExitOnErr(rootCmd, "", err)
+
+ // Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml"
+ viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli"))
+ viper.SetConfigName("config")
+ viper.SetConfigType("yaml")
}
viper.SetEnvPrefix(envPrefix)
diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go
index e2c05d486..019feb0ec 100644
--- a/cmd/frostfs-cli/modules/tree/add.go
+++ b/cmd/frostfs-cli/modules/tree/add.go
@@ -30,6 +30,8 @@ func initAddCmd() {
ff := addCmd.Flags()
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
ff.Uint64(parentIDFlagKey, 0, "Parent node ID")
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func add(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/add_by_path.go b/cmd/frostfs-cli/modules/tree/add_by_path.go
index 7263bcd0d..5d5b00b7d 100644
--- a/cmd/frostfs-cli/modules/tree/add_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/add_by_path.go
@@ -36,6 +36,7 @@ func initAddByPathCmd() {
ff.String(pathFlagKey, "", "Path to a node")
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
_ = cobra.MarkFlagRequired(ff, pathFlagKey)
}
diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go
index d71a94b98..4e0099f02 100644
--- a/cmd/frostfs-cli/modules/tree/client.go
+++ b/cmd/frostfs-cli/modules/tree/client.go
@@ -2,19 +2,17 @@ package tree
import (
"context"
- "crypto/tls"
- "fmt"
+ "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@@ -22,40 +20,27 @@ import (
// after making Tree API public.
func _client() (tree.TreeServiceClient, error) {
var netAddr network.Address
-
- rpcEndpoint := viper.GetString(commonflags.RPC)
- if rpcEndpoint == "" {
- return nil, fmt.Errorf("%s is not defined", commonflags.RPC)
- }
-
- err := netAddr.FromString(rpcEndpoint)
+ err := netAddr.FromString(viper.GetString(commonflags.RPC))
if err != nil {
return nil, err
}
- host, isTLS, err := client.ParseURI(netAddr.URIAddr())
- if err != nil {
- return nil, err
- }
-
- creds := insecure.NewCredentials()
- if isTLS {
- creds = credentials.NewTLS(&tls.Config{})
- }
-
opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
- tracing.NewUnaryClientInterceptor(),
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- grpc.WithDisableServiceConfig(),
- grpc.WithTransportCredentials(creds),
}
- cc, err := grpc.NewClient(host, opts...)
+ if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
return tree.NewTreeServiceClient(cc), err
}
diff --git a/cmd/frostfs-cli/modules/tree/get_by_path.go b/cmd/frostfs-cli/modules/tree/get_by_path.go
index 210630e60..7061723fd 100644
--- a/cmd/frostfs-cli/modules/tree/get_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/get_by_path.go
@@ -36,6 +36,8 @@ func initGetByPathCmd() {
ff.String(pathFlagKey, "", "Path to a node")
ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node")
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getByPath(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/get_op_log.go b/cmd/frostfs-cli/modules/tree/get_op_log.go
index 9d767ab3e..376aa8e8d 100644
--- a/cmd/frostfs-cli/modules/tree/get_op_log.go
+++ b/cmd/frostfs-cli/modules/tree/get_op_log.go
@@ -30,6 +30,8 @@ func initGetOpLogCmd() {
ff := getOpLogCmd.Flags()
ff.Uint64(heightFlagKey, 0, "Height to start with")
ff.Uint64(countFlagKey, 10, "Logged operations count")
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getOpLog(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/healthcheck.go b/cmd/frostfs-cli/modules/tree/healthcheck.go
index c581b8e26..b01bb2e77 100644
--- a/cmd/frostfs-cli/modules/tree/healthcheck.go
+++ b/cmd/frostfs-cli/modules/tree/healthcheck.go
@@ -20,6 +20,8 @@ var healthcheckCmd = &cobra.Command{
func initHealthcheckCmd() {
commonflags.Init(healthcheckCmd)
+ ff := healthcheckCmd.Flags()
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func healthcheck(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/list.go b/cmd/frostfs-cli/modules/tree/list.go
index ee1db2a79..f8c0e490f 100644
--- a/cmd/frostfs-cli/modules/tree/list.go
+++ b/cmd/frostfs-cli/modules/tree/list.go
@@ -26,6 +26,8 @@ func initListCmd() {
ff := listCmd.Flags()
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = listCmd.MarkFlagRequired(commonflags.CIDFlag)
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func list(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go
index 7a369bd02..dc807d752 100644
--- a/cmd/frostfs-cli/modules/tree/move.go
+++ b/cmd/frostfs-cli/modules/tree/move.go
@@ -33,6 +33,8 @@ func initMoveCmd() {
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
_ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey)
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func move(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/remove.go b/cmd/frostfs-cli/modules/tree/remove.go
index 3c532fe26..d0b6fab2f 100644
--- a/cmd/frostfs-cli/modules/tree/remove.go
+++ b/cmd/frostfs-cli/modules/tree/remove.go
@@ -29,6 +29,8 @@ func initRemoveCmd() {
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func remove(cmd *cobra.Command, _ []string) {
diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go
index c5f7ad401..83a8909b6 100644
--- a/cmd/frostfs-cli/modules/tree/subtree.go
+++ b/cmd/frostfs-cli/modules/tree/subtree.go
@@ -34,6 +34,8 @@ func initGetSubtreeCmd() {
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getSubTree(cmd *cobra.Command, _ []string) {
diff --git a/pkg/util/ape/parser.go b/cmd/frostfs-cli/modules/util/ape.go
similarity index 87%
rename from pkg/util/ape/parser.go
rename to cmd/frostfs-cli/modules/util/ape.go
index 6f114d45b..73c368510 100644
--- a/pkg/util/ape/parser.go
+++ b/cmd/frostfs-cli/modules/util/ape.go
@@ -1,14 +1,16 @@
-package ape
+package util
import (
"errors"
"fmt"
"os"
+ "strconv"
"strings"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/flynn-archive/go-shlex"
+ "github.com/spf13/cobra"
)
var (
@@ -25,6 +27,38 @@ var (
errFailedToParseAllAny = errors.New("any/all is not parsed")
)
+// PrintHumanReadableAPEChain print APE chain rules.
+func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) {
+ cmd.Println("Chain ID: " + string(chain.ID))
+ cmd.Printf(" HEX: %x\n", chain.ID)
+ cmd.Println("Rules:")
+ for _, rule := range chain.Rules {
+ cmd.Println("\n\tStatus: " + rule.Status.String())
+ cmd.Println("\tAny: " + strconv.FormatBool(rule.Any))
+ cmd.Println("\tConditions:")
+ for _, c := range rule.Condition {
+ var ot string
+ switch c.Kind {
+ case apechain.KindResource:
+ ot = "Resource"
+ case apechain.KindRequest:
+ ot = "Request"
+ default:
+ panic("unknown object type")
+ }
+ cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value))
+ }
+ cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted))
+ for _, name := range rule.Actions.Names {
+ cmd.Println("\t\t" + name)
+ }
+ cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted))
+ for _, name := range rule.Resources.Names {
+ cmd.Println("\t\t" + name)
+ }
+ }
+}
+
func ParseAPEChainBinaryOrJSON(chain *apechain.Chain, path string) error {
data, err := os.ReadFile(path)
if err != nil {
@@ -174,11 +208,11 @@ func parseStatus(lexeme string) (apechain.Status, error) {
case "deny":
if !found {
return apechain.AccessDenied, nil
- }
- if strings.EqualFold(expression, "QuotaLimitReached") {
+ } else if strings.EqualFold(expression, "QuotaLimitReached") {
return apechain.QuotaLimitReached, nil
+ } else {
+ return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression)
}
- return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression)
case "allow":
if found {
return 0, errUnknownStatusDetail
@@ -261,7 +295,7 @@ func parseResource(lexeme string, isObj bool) (string, error) {
} else {
if lexeme == "*" {
return nativeschema.ResourceFormatAllContainers, nil
- } else if lexeme == "/*" || lexeme == "root/*" {
+ } else if lexeme == "/*" {
return nativeschema.ResourceFormatRootContainers, nil
} else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 {
lexeme = lexeme[1:]
diff --git a/pkg/util/ape/parser_test.go b/cmd/frostfs-cli/modules/util/ape_test.go
similarity index 96%
rename from pkg/util/ape/parser_test.go
rename to cmd/frostfs-cli/modules/util/ape_test.go
index c236c4603..b275803df 100644
--- a/pkg/util/ape/parser_test.go
+++ b/cmd/frostfs-cli/modules/util/ape_test.go
@@ -1,4 +1,4 @@
-package ape
+package util
import (
"fmt"
@@ -43,15 +43,6 @@ func TestParseAPERule(t *testing.T) {
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
},
},
- {
- name: "Valid rule for all containers in explicit root namespace",
- rule: "allow Container.Put root/*",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}},
- },
- },
{
name: "Valid rule for all objects in root namespace and container",
rule: "allow Object.Put /cid/*",
diff --git a/cmd/frostfs-cli/modules/util/convert_eacl.go b/cmd/frostfs-cli/modules/util/convert_eacl.go
index caa6dfcfe..d588ba35d 100644
--- a/cmd/frostfs-cli/modules/util/convert_eacl.go
+++ b/cmd/frostfs-cli/modules/util/convert_eacl.go
@@ -6,7 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape"
+ apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go
index 13a747ba6..4eaac845c 100644
--- a/cmd/frostfs-ir/config.go
+++ b/cmd/frostfs-ir/config.go
@@ -1,17 +1,13 @@
package main
import (
- "context"
"os"
"os/signal"
- "strconv"
"syscall"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/spf13/cast"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@@ -41,36 +37,14 @@ func reloadConfig() error {
}
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
audit.Store(cfg.GetBool("audit.enabled"))
- var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil {
return err
}
- err = logPrm.SetTags(loggerTags())
- if err != nil {
- return err
- }
- logger.UpdateLevelForTags(logPrm)
-
- return nil
+ return logPrm.Reload()
}
-func loggerTags() [][]string {
- var res [][]string
- for i := 0; ; i++ {
- var item []string
- index := strconv.FormatInt(int64(i), 10)
- names := cast.ToString(cfg.Get("logger.tags." + index + ".names"))
- if names == "" {
- break
- }
- item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level")))
- res = append(res, item)
- }
- return res
-}
-
-func watchForSignal(ctx context.Context, cancel func()) {
+func watchForSignal(cancel func()) {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
@@ -82,49 +56,49 @@ func watchForSignal(ctx context.Context, cancel func()) {
// signals causing application to shut down should have priority over
// reconfiguration signal
case <-ch:
- log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
cancel()
- shutdown(ctx)
- log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ shutdown()
+ log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-intErr: // internal application error
- log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
+ log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
cancel()
- shutdown(ctx)
+ shutdown()
return
default:
// block until any signal is receieved
select {
case <-ch:
- log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
cancel()
- shutdown(ctx)
- log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ shutdown()
+ log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-intErr: // internal application error
- log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
+ log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
cancel()
- shutdown(ctx)
+ shutdown()
return
case <-sighupCh:
- log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
- if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
- log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
+ log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
+ if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
+ log.Info(logs.FrostFSNodeSIGHUPSkip)
break
}
err := reloadConfig()
if err != nil {
- log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
- pprofCmp.reload(ctx)
- metricsCmp.reload(ctx)
- log.Info(ctx, logs.FrostFSIRReloadExtraWallets)
+ pprofCmp.reload()
+ metricsCmp.reload()
+ log.Info(logs.FrostFSIRReloadExtraWallets)
err = innerRing.SetExtraWallets(cfg)
if err != nil {
- log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
- innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
- log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+ innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
+ log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
}
}
diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go
index 9b775252f..e703301ae 100644
--- a/cmd/frostfs-ir/defaults.go
+++ b/cmd/frostfs-ir/defaults.go
@@ -9,7 +9,6 @@ import (
func defaultConfiguration(cfg *viper.Viper) {
cfg.SetDefault("logger.level", "info")
cfg.SetDefault("logger.destination", "stdout")
- cfg.SetDefault("logger.timestamp", false)
setPprofDefaults(cfg)
@@ -48,8 +47,6 @@ func defaultConfiguration(cfg *viper.Viper) {
cfg.SetDefault("node.kludge_compatibility_mode", false)
cfg.SetDefault("audit.enabled", false)
-
- setMultinetDefaults(cfg)
}
func setControlDefaults(cfg *viper.Viper) {
@@ -133,11 +130,3 @@ func setMorphDefaults(cfg *viper.Viper) {
cfg.SetDefault("morph.validators", []string{})
cfg.SetDefault("morph.switch_interval", 2*time.Minute)
}
-
-func setMultinetDefaults(cfg *viper.Viper) {
- cfg.SetDefault("multinet.enabled", false)
- cfg.SetDefault("multinet.balancer", "")
- cfg.SetDefault("multinet.restrict", false)
- cfg.SetDefault("multinet.fallback_delay", "0s")
- cfg.SetDefault("multinet.subnets", "")
-}
diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go
index dd70fc91c..2792c3548 100644
--- a/cmd/frostfs-ir/httpcomponent.go
+++ b/cmd/frostfs-ir/httpcomponent.go
@@ -1,7 +1,6 @@
package main
import (
- "context"
"net/http"
"time"
@@ -25,8 +24,8 @@ const (
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
)
-func (c *httpComponent) init(ctx context.Context) {
- log.Info(ctx, "init "+c.name)
+func (c *httpComponent) init() {
+ log.Info("init " + c.name)
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
c.address = cfg.GetString(c.name + addressKeyPostfix)
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
@@ -40,14 +39,14 @@ func (c *httpComponent) init(ctx context.Context) {
httputil.WithShutdownTimeout(c.shutdownDur),
)
} else {
- log.Info(ctx, c.name+" is disabled, skip")
+ log.Info(c.name + " is disabled, skip")
c.srv = nil
}
}
-func (c *httpComponent) start(ctx context.Context) {
+func (c *httpComponent) start() {
if c.srv != nil {
- log.Info(ctx, "start "+c.name)
+ log.Info("start " + c.name)
wg.Add(1)
go func() {
defer wg.Done()
@@ -56,10 +55,10 @@ func (c *httpComponent) start(ctx context.Context) {
}
}
-func (c *httpComponent) shutdown(ctx context.Context) error {
+func (c *httpComponent) shutdown() error {
if c.srv != nil {
- log.Info(ctx, "shutdown "+c.name)
- return c.srv.Shutdown(ctx)
+ log.Info("shutdown " + c.name)
+ return c.srv.Shutdown()
}
return nil
}
@@ -71,17 +70,17 @@ func (c *httpComponent) needReload() bool {
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
}
-func (c *httpComponent) reload(ctx context.Context) {
- log.Info(ctx, "reload "+c.name)
+func (c *httpComponent) reload() {
+ log.Info("reload " + c.name)
if c.needReload() {
- log.Info(ctx, c.name+" config updated")
- if err := c.shutdown(ctx); err != nil {
- log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.Error(err),
+ log.Info(c.name + " config updated")
+ if err := c.shutdown(); err != nil {
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.String("error", err.Error()),
)
} else {
- c.init(ctx)
- c.start(ctx)
+ c.init()
+ c.start()
}
}
}
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index 799feb784..02936ae78 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -31,6 +31,7 @@ const (
var (
wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors
+ logPrm = new(logger.Prm)
innerRing *innerring.Server
pprofCmp *pprofComponent
metricsCmp *httpComponent
@@ -69,7 +70,6 @@ func main() {
metrics := irMetrics.NewInnerRingMetrics()
- var logPrm logger.Prm
err = logPrm.SetLevelString(
cfg.GetString("logger.level"),
)
@@ -79,60 +79,54 @@ func main() {
)
exitErr(err)
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
- logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
- err = logPrm.SetTags(loggerTags())
- exitErr(err)
-
log, err = logger.NewLogger(logPrm)
exitErr(err)
- logger.UpdateLevelForTags(logPrm)
-
ctx, cancel := context.WithCancel(context.Background())
pprofCmp = newPprofComponent()
- pprofCmp.init(ctx)
+ pprofCmp.init()
metricsCmp = newMetricsComponent()
- metricsCmp.init(ctx)
+ metricsCmp.init()
audit.Store(cfg.GetBool("audit.enabled"))
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit)
exitErr(err)
- pprofCmp.start(ctx)
- metricsCmp.start(ctx)
+ pprofCmp.start()
+ metricsCmp.start()
// start inner ring
err = innerRing.Start(ctx, intErr)
exitErr(err)
- log.Info(ctx, logs.CommonApplicationStarted,
+ log.Info(logs.CommonApplicationStarted,
zap.String("version", misc.Version))
- watchForSignal(ctx, cancel)
+ watchForSignal(cancel)
<-ctx.Done() // graceful shutdown
- log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop)
+ log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
wg.Wait()
- log.Info(ctx, logs.FrostFSIRApplicationStopped)
+ log.Info(logs.FrostFSIRApplicationStopped)
}
-func shutdown(ctx context.Context) {
- innerRing.Stop(ctx)
- if err := metricsCmp.shutdown(ctx); err != nil {
- log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.Error(err),
+func shutdown() {
+ innerRing.Stop()
+ if err := metricsCmp.shutdown(); err != nil {
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.String("error", err.Error()),
)
}
- if err := pprofCmp.shutdown(ctx); err != nil {
- log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.Error(err),
+ if err := pprofCmp.shutdown(); err != nil {
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.String("error", err.Error()),
)
}
if err := sdnotify.ClearStatus(); err != nil {
- log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
+ log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}
diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go
index 2aebcde7f..ff5642008 100644
--- a/cmd/frostfs-ir/pprof.go
+++ b/cmd/frostfs-ir/pprof.go
@@ -1,7 +1,6 @@
package main
import (
- "context"
"runtime"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -29,8 +28,8 @@ func newPprofComponent() *pprofComponent {
}
}
-func (c *pprofComponent) init(ctx context.Context) {
- c.httpComponent.init(ctx)
+func (c *pprofComponent) init() {
+ c.httpComponent.init()
if c.enabled {
c.blockRate = cfg.GetInt(pprofBlockRateKey)
@@ -52,17 +51,17 @@ func (c *pprofComponent) needReload() bool {
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
}
-func (c *pprofComponent) reload(ctx context.Context) {
- log.Info(ctx, "reload "+c.name)
+func (c *pprofComponent) reload() {
+ log.Info("reload " + c.name)
if c.needReload() {
- log.Info(ctx, c.name+" config updated")
- if err := c.shutdown(ctx); err != nil {
- log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.Error(err))
+ log.Info(c.name + " config updated")
+ if err := c.shutdown(); err != nil {
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.String("error", err.Error()))
return
}
- c.init(ctx)
- c.start(ctx)
+ c.init()
+ c.start()
}
}
diff --git a/cmd/frostfs-lens/internal/blobovnicza/inspect.go b/cmd/frostfs-lens/internal/blobovnicza/inspect.go
index e7e2c0769..b1a6e3fd2 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/inspect.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/inspect.go
@@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
blz := openBlobovnicza(cmd)
- defer blz.Close(cmd.Context())
+ defer blz.Close()
var prm blobovnicza.GetPrm
prm.SetAddress(addr)
diff --git a/cmd/frostfs-lens/internal/blobovnicza/list.go b/cmd/frostfs-lens/internal/blobovnicza/list.go
index d41a15bcf..d327dbc41 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/list.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/list.go
@@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
}
blz := openBlobovnicza(cmd)
- defer blz.Close(cmd.Context())
+ defer blz.Close()
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))
diff --git a/cmd/frostfs-lens/internal/blobovnicza/root.go b/cmd/frostfs-lens/internal/blobovnicza/root.go
index 2819981d6..9d8ef3dad 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/root.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/root.go
@@ -27,7 +27,7 @@ func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
blobovnicza.WithPath(vPath),
blobovnicza.WithReadOnly(true),
)
- common.ExitOnErr(cmd, blz.Open(cmd.Context()))
+ common.ExitOnErr(cmd, blz.Open())
return blz
}
diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go
index 4aa281616..eb4a5ff59 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/tui.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/tui.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -42,7 +43,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := tui.OpenDB(vPath, false)
+ db, err := openDB(false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -66,3 +67,13 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
+
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go
index f436343c7..9eb60f966 100644
--- a/cmd/frostfs-lens/internal/meta/inspect.go
+++ b/cmd/frostfs-lens/internal/meta/inspect.go
@@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
db := openMeta(cmd)
- defer db.Close(cmd.Context())
+ defer db.Close()
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)
diff --git a/cmd/frostfs-lens/internal/meta/list-garbage.go b/cmd/frostfs-lens/internal/meta/list-garbage.go
index 6b27a232f..61b10ca1f 100644
--- a/cmd/frostfs-lens/internal/meta/list-garbage.go
+++ b/cmd/frostfs-lens/internal/meta/list-garbage.go
@@ -19,7 +19,7 @@ func init() {
func listGarbageFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
- defer db.Close(cmd.Context())
+ defer db.Close()
var garbPrm meta.GarbageIterationPrm
garbPrm.SetHandler(
diff --git a/cmd/frostfs-lens/internal/meta/list-graveyard.go b/cmd/frostfs-lens/internal/meta/list-graveyard.go
index 45642e74b..19a93691c 100644
--- a/cmd/frostfs-lens/internal/meta/list-graveyard.go
+++ b/cmd/frostfs-lens/internal/meta/list-graveyard.go
@@ -19,7 +19,7 @@ func init() {
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
- defer db.Close(cmd.Context())
+ defer db.Close()
var gravePrm meta.GraveyardIterationPrm
gravePrm.SetHandler(
diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go
index 7b0e25f3d..00e8bf117 100644
--- a/cmd/frostfs-lens/internal/meta/tui.go
+++ b/cmd/frostfs-lens/internal/meta/tui.go
@@ -2,12 +2,9 @@ package meta
import (
"context"
- "encoding/binary"
- "errors"
"fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
- schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
@@ -31,11 +28,6 @@ Available search filters:
var initialPrompt string
-var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
- 2: schema.MetabaseParserV2,
- 3: schema.MetabaseParserV3,
-}
-
func init() {
common.AddComponentPathFlag(tuiCMD, &vPath)
@@ -52,28 +44,18 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := tui.OpenDB(vPath, false)
+ db, err := openDB(false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
defer db.Close()
- schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
- if !hasVersion {
- return errors.New("couldn't detect schema version")
- }
-
- metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
- if !ok {
- return fmt.Errorf("unknown schema version %d", schemaVersion)
- }
-
// Need if app was stopped with Ctrl-C.
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
app := tview.NewApplication()
- ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
+ ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
@@ -89,30 +71,12 @@ func runTUI(cmd *cobra.Command) error {
return app.Run()
}
-var (
- shardInfoBucket = []byte{5}
- versionRecord = []byte("version")
-)
-
-func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
- err := db.View(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(shardInfoBucket)
- if bkt == nil {
- return nil
- }
- rec := bkt.Get(versionRecord)
- if rec == nil {
- return nil
- }
-
- version = binary.LittleEndian.Uint64(rec)
- ok = true
-
- return nil
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
})
if err != nil {
- common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
+ return nil, err
}
-
- return
+ return db, nil
}
diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go
index 55051554c..0990e24c3 100644
--- a/cmd/frostfs-lens/internal/schema/common/raw.go
+++ b/cmd/frostfs-lens/internal/schema/common/raw.go
@@ -7,8 +7,6 @@ import (
)
type RawEntry struct {
- // key and value used for record dump.
- // nolint:unused
key, value []byte
}
diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go
index 077a68785..9bad19032 100644
--- a/cmd/frostfs-lens/internal/schema/common/schema.go
+++ b/cmd/frostfs-lens/internal/schema/common/schema.go
@@ -3,8 +3,6 @@ package common
import (
"errors"
"fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
type FilterResult byte
@@ -73,7 +71,11 @@ func (fp FallbackParser) ToParser() Parser {
func (p Parser) ToFallbackParser() FallbackParser {
return func(key, value []byte) (SchemaEntry, Parser) {
entry, next, err := p(key, value)
- assert.NoError(err, "couldn't use that parser as a fallback parser")
+ if err != nil {
+ panic(fmt.Errorf(
+ "couldn't use that parser as a fallback parser, it returned an error: %w", err,
+ ))
+ }
return entry, next
}
}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
index 4e6bbf08a..24cc0e52d 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
@@ -80,15 +80,10 @@ var (
},
)
- UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
+ UserAttributeParser = NewUserAttributeKeyBucketParser(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
)
- UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
- NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
- []string{"FilePath", "S3-Access-Box-CRDT-Name"},
- )
-
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: StrictResolver,
@@ -113,14 +108,4 @@ var (
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
-
- ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
- cidResolver: LenientResolver,
- oidResolver: LenientResolver,
- })
-
- ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
index 42a24c594..2fb122940 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
@@ -22,31 +22,27 @@ const (
Split
ContainerCounters
ECInfo
- ExpirationEpochToObject
- ObjectToExpirationEpoch
)
var x = map[Prefix]string{
- Graveyard: "Graveyard",
- Garbage: "Garbage",
- ToMoveIt: "To Move It",
- ContainerVolume: "Container Volume",
- Locked: "Locked",
- ShardInfo: "Shard Info",
- Primary: "Primary",
- Lockers: "Lockers",
- Tombstone: "Tombstone",
- Small: "Small",
- Root: "Root",
- Owner: "Owner",
- UserAttribute: "User Attribute",
- PayloadHash: "Payload Hash",
- Parent: "Parent",
- Split: "Split",
- ContainerCounters: "Container Counters",
- ECInfo: "EC Info",
- ExpirationEpochToObject: "Exp. Epoch to Object",
- ObjectToExpirationEpoch: "Object to Exp. Epoch",
+ Graveyard: "Graveyard",
+ Garbage: "Garbage",
+ ToMoveIt: "To Move It",
+ ContainerVolume: "Container Volume",
+ Locked: "Locked",
+ ShardInfo: "Shard Info",
+ Primary: "Primary",
+ Lockers: "Lockers",
+ Tombstone: "Tombstone",
+ Small: "Small",
+ Root: "Root",
+ Owner: "Owner",
+ UserAttribute: "User Attribute",
+ PayloadHash: "Payload Hash",
+ Parent: "Parent",
+ Split: "Split",
+ ContainerCounters: "Container Counters",
+ ECInfo: "EC Info",
}
func (p Prefix) String() string {
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
index 62d126f88..db90bddbd 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
@@ -9,7 +9,7 @@ import (
func (b *PrefixBucket) String() string {
return common.FormatSimple(
- fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
)
}
@@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
return fmt.Sprintf(
"%s CID %s",
common.FormatSimple(
- fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(b.id.String(), tcell.ColorAqua),
)
@@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
func (b *UserAttributeKeyBucket) String() string {
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
common.FormatSimple(
- fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
index 7355c3d9e..82b47dd85 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
@@ -2,7 +2,6 @@ package buckets
import (
"errors"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -58,11 +57,10 @@ var (
)
var (
- ErrNotBucket = errors.New("not a bucket")
- ErrInvalidKeyLength = errors.New("invalid key length")
- ErrInvalidValueLength = errors.New("invalid value length")
- ErrInvalidPrefix = errors.New("invalid prefix")
- ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
+ ErrNotBucket = errors.New("not a bucket")
+ ErrInvalidKeyLength = errors.New("invalid key length")
+ ErrInvalidValueLength = errors.New("invalid value length")
+ ErrInvalidPrefix = errors.New("invalid prefix")
)
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
@@ -134,10 +132,6 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
}
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
- return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
-}
-
-func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if value != nil {
return nil, nil, ErrNotBucket
@@ -153,11 +147,6 @@ func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []
return nil, nil, err
}
b.key = string(key[33:])
-
- if len(keys) != 0 && !slices.Contains(keys, b.key) {
- return nil, nil, ErrUnexpectedAttributeKey
- }
-
return &b, next, nil
}
}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go
index 4cc9e8765..ea095e207 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/parser.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go
@@ -5,30 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
)
-var MetabaseParserV3 = common.WithFallback(
- common.Any(
- buckets.GraveyardParser,
- buckets.GarbageParser,
- buckets.ContainerVolumeParser,
- buckets.LockedParser,
- buckets.ShardInfoParser,
- buckets.PrimaryParser,
- buckets.LockersParser,
- buckets.TombstoneParser,
- buckets.SmallParser,
- buckets.RootParser,
- buckets.UserAttributeParserV3,
- buckets.ParentParser,
- buckets.SplitParser,
- buckets.ContainerCountersParser,
- buckets.ECInfoParser,
- buckets.ExpirationEpochToObjectParser,
- buckets.ObjectToExpirationEpochParser,
- ),
- common.RawParser.ToFallbackParser(),
-)
-
-var MetabaseParserV2 = common.WithFallback(
+var MetabaseParser = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
@@ -41,7 +18,7 @@ var MetabaseParserV2 = common.WithFallback(
buckets.SmallParser,
buckets.RootParser,
buckets.OwnerParser,
- buckets.UserAttributeParserV2,
+ buckets.UserAttributeParser,
buckets.PayloadHashParser,
buckets.ParentParser,
buckets.SplitParser,
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
index 477c4fc9d..2dda15b4f 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
@@ -63,11 +63,3 @@ func (r *ContainerCountersRecord) DetailedString() string {
func (r *ECInfoRecord) DetailedString() string {
return spew.Sdump(*r)
}
-
-func (r *ExpirationEpochToObjectRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ObjectToExpirationEpochRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
index e038911d7..880a7a8ff 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
@@ -143,26 +143,3 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
return common.No
}
}
-
-func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
index 5d846cb75..1b070e2a0 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
@@ -249,45 +249,3 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
}
return &r, nil, nil
}
-
-func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 72 {
- return nil, nil, ErrInvalidKeyLength
- }
-
- var (
- r ExpirationEpochToObjectRecord
- err error
- )
-
- r.epoch = binary.BigEndian.Uint64(key[:8])
- if err = r.cnt.Decode(key[8:40]); err != nil {
- return nil, nil, err
- }
- if err = r.obj.Decode(key[40:]); err != nil {
- return nil, nil, err
- }
-
- return &r, nil, nil
-}
-
-func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 32 {
- return nil, nil, ErrInvalidKeyLength
- }
- if len(value) != 8 {
- return nil, nil, ErrInvalidValueLength
- }
-
- var (
- r ObjectToExpirationEpochRecord
- err error
- )
-
- if err = r.obj.Decode(key); err != nil {
- return nil, nil, err
- }
- r.epoch = binary.LittleEndian.Uint64(value)
-
- return &r, nil, nil
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
index f71244625..a6c70d537 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
@@ -2,7 +2,6 @@ package records
import (
"fmt"
- "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"github.com/gdamore/tcell/v2"
@@ -39,7 +38,7 @@ func (r *ContainerVolumeRecord) String() string {
func (r *LockedRecord) String() string {
return fmt.Sprintf(
- "Object OID %s %c Lockers [%d]OID {...}",
+ "Locker OID %s %c Locked [%d]OID {...}",
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
tview.Borders.Vertical,
len(r.ids),
@@ -134,22 +133,3 @@ func (r *ECInfoRecord) String() string {
len(r.ids),
)
}
-
-func (r *ExpirationEpochToObjectRecord) String() string {
- return fmt.Sprintf(
- "exp. epoch %s %c CID %s OID %s",
- common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
- tview.Borders.Vertical,
- common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
- common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
- )
-}
-
-func (r *ObjectToExpirationEpochRecord) String() string {
- return fmt.Sprintf(
- "OID %s %c exp. epoch %s",
- common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
- tview.Borders.Vertical,
- common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
- )
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
index 0809cad1a..34c1c29fd 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/types.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
@@ -79,15 +79,4 @@ type (
id oid.ID
ids []oid.ID
}
-
- ExpirationEpochToObjectRecord struct {
- epoch uint64
- cnt cid.ID
- obj oid.ID
- }
-
- ObjectToExpirationEpochRecord struct {
- obj oid.ID
- epoch uint64
- }
)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/util.go b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
index d15d69146..f50ebe951 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/util.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
@@ -11,7 +11,7 @@ func DecodeOIDs(data []byte) ([]oid.ID, error) {
size := r.ReadVarUint()
oids := make([]oid.ID, size)
- for i := range size {
+ for i := uint64(0); i < size; i++ {
if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
return nil, err
}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
index 3bfe2608b..7d70b27b2 100644
--- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go
+++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
@@ -57,7 +57,7 @@ func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser,
r.addr.SetContainer(cnr)
r.addr.SetObject(obj)
- r.data = value
+ r.data = value[:]
return &r, nil, nil
}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go
index 11e6f3fcd..3f71c5366 100644
--- a/cmd/frostfs-lens/internal/schema/writecache/types.go
+++ b/cmd/frostfs-lens/internal/schema/writecache/types.go
@@ -16,8 +16,6 @@ type (
DefaultRecord struct {
addr oid.Address
- // data used for record dump.
- // nolint:unused
data []byte
}
)
diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go
index 2d3b20792..3f5088e7a 100644
--- a/cmd/frostfs-lens/internal/tui/buckets.go
+++ b/cmd/frostfs-lens/internal/tui/buckets.go
@@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path
parser := parentBucket.NextParser
- buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
+ buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
+ if err != nil {
+ return err
+ }
for item := range buffer {
if item.err != nil {
@@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
}
bucket := item.val
- var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil {
return err
@@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel()
// Check the current bucket's nested buckets if exist
- bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
+ bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return false, err
+ }
for item := range bucketsBuffer {
if item.err != nil {
@@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
b := item.val
- var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil {
return false, err
@@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
// Check the current bucket's nested records if exist
- recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
+ recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return false, err
+ }
for item := range recordsBuffer {
if item.err != nil {
@@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
r := item.val
- var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil {
return false, err
diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go
index 94fa87f98..d0cf611d4 100644
--- a/cmd/frostfs-lens/internal/tui/db.go
+++ b/cmd/frostfs-lens/internal/tui/db.go
@@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T,
-) <-chan Item[T] {
+) (<-chan Item[T], error) {
buffer := make(chan Item[T], bufferSize)
go func() {
@@ -77,13 +77,13 @@ func load[T any](
}
}()
- return buffer
+ return buffer, nil
}
func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
-) <-chan Item[*Bucket] {
- buffer := load(
+) (<-chan Item[*Bucket], error) {
+ buffer, err := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value == nil
@@ -98,14 +98,17 @@ func LoadBuckets(
}
},
)
+ if err != nil {
+ return nil, fmt.Errorf("can't start iterating bucket: %w", err)
+ }
- return buffer
+ return buffer, nil
}
func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
-) <-chan Item[*Record] {
- buffer := load(
+) (<-chan Item[*Record], error) {
+ buffer, err := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value != nil
@@ -121,8 +124,11 @@ func LoadRecords(
}
},
)
+ if err != nil {
+ return nil, fmt.Errorf("can't start iterating bucket: %w", err)
+ }
- return buffer
+ return buffer, nil
}
// HasBuckets checks if a bucket has nested buckets. It relies on assumption
@@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- buffer := load(
+ buffer, err := load(
ctx, db, path, 1,
nil,
func(_, value []byte) []byte { return value },
)
+ if err != nil {
+ return false, err
+ }
x, ok := <-buffer
if !ok {
return false, nil
}
if x.err != nil {
- return false, x.err
+ return false, err
}
if x.val != nil {
- return false, nil
+ return false, err
}
return true, nil
}
diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go
index 471514e5d..4fdf97119 100644
--- a/cmd/frostfs-lens/internal/tui/input.go
+++ b/cmd/frostfs-lens/internal/tui/input.go
@@ -1,8 +1,6 @@
package tui
import (
- "slices"
-
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@@ -28,7 +26,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
// Used history data for search prompt, so just make that data recent.
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
- f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
+ f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
f.history = append(f.history, s)
}
@@ -53,17 +51,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
f.historyPointer++
// Stop iterating over history.
if f.historyPointer == len(f.history) {
- f.SetText(f.currentContent)
+ f.InputField.SetText(f.currentContent)
return
}
- f.SetText(f.history[f.historyPointer])
+ f.InputField.SetText(f.history[f.historyPointer])
case tcell.KeyUp:
if len(f.history) == 0 {
return
}
// Start iterating over history.
if f.historyPointer == len(f.history) {
- f.currentContent = f.GetText()
+ f.currentContent = f.InputField.GetText()
}
// End of history.
if f.historyPointer == 0 {
@@ -71,7 +69,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
}
// Iterate to least recent prompts.
f.historyPointer--
- f.SetText(f.history[f.historyPointer])
+ f.InputField.SetText(f.history[f.historyPointer])
default:
f.InputField.InputHandler()(event, func(tview.Primitive) {})
}
diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go
index a4d392ab3..5f53ed287 100644
--- a/cmd/frostfs-lens/internal/tui/records.go
+++ b/cmd/frostfs-lens/internal/tui/records.go
@@ -8,7 +8,6 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@@ -63,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx)
- tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
+ tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return err
+ }
v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() {
@@ -71,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer {
if item.err != nil {
- v.ui.stopOnError(item.err)
+ v.ui.stopOnError(err)
break
}
record := item.val
- var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil {
v.ui.stopOnError(err)
@@ -95,7 +96,9 @@ func (v *RecordsView) Mount(ctx context.Context) error {
}
func (v *RecordsView) Unmount() {
- assert.False(v.onUnmount == nil, "try to unmount not mounted component")
+ if v.onUnmount == nil {
+ panic("try to unmount not mounted component")
+ }
v.onUnmount()
v.onUnmount = nil
}
diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go
index cc6b7859e..bcc082821 100644
--- a/cmd/frostfs-lens/internal/tui/ui.go
+++ b/cmd/frostfs-lens/internal/tui/ui.go
@@ -460,11 +460,11 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
return
}
- switch v := ui.mountedPage.(type) {
+ switch ui.mountedPage.(type) {
case *BucketsView:
ui.moveNextPage(NewBucketsView(ui, res))
case *RecordsView:
- bucket := v.bucket
+ bucket := ui.mountedPage.(*RecordsView).bucket
ui.moveNextPage(NewRecordsView(ui, bucket, res))
}
@@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
}
- ui.MouseHandler()
+ ui.Box.MouseHandler()
}
func (ui *UI) WithPrompt(prompt string) error {
diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go
index 2d1ab3e33..d4e13b2a9 100644
--- a/cmd/frostfs-lens/internal/tui/util.go
+++ b/cmd/frostfs-lens/internal/tui/util.go
@@ -3,25 +3,12 @@ package tui
import (
"errors"
"strings"
- "time"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58"
- "go.etcd.io/bbolt"
)
-func OpenDB(path string, writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(path, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- Timeout: 100 * time.Millisecond,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
-
func CIDParser(s string) (any, error) {
data, err := base58.Decode(s)
if err != nil {
diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go
index b7e4d7c96..6b7532b08 100644
--- a/cmd/frostfs-lens/internal/writecache/tui.go
+++ b/cmd/frostfs-lens/internal/writecache/tui.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -42,7 +43,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := tui.OpenDB(vPath, false)
+ db, err := openDB(false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -66,3 +67,13 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
+
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go
index 2d52e0c56..ec737f8a0 100644
--- a/cmd/frostfs-node/accounting.go
+++ b/cmd/frostfs-node/accounting.go
@@ -3,18 +3,19 @@ package main
import (
"context"
"net"
- "strings"
+ accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
accountingTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/accounting/grpc"
accountingService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
accounting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting/morph"
- accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc"
"google.golang.org/grpc"
)
func initAccountingService(ctx context.Context, c *cfg) {
- c.initMorphComponents(ctx)
+ if c.cfgMorph.client == nil {
+ initMorphComponents(ctx, c)
+ }
balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0)
fatalOnErr(err)
@@ -31,27 +32,5 @@ func initAccountingService(ctx context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
accountingGRPC.RegisterAccountingServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(accountingGRPC.AccountingService_ServiceDesc), server)
})
}
-
-// frostFSServiceDesc creates a service descriptor with the new namespace for dual service support.
-func frostFSServiceDesc(sd grpc.ServiceDesc) *grpc.ServiceDesc {
- sdLegacy := new(grpc.ServiceDesc)
- *sdLegacy = sd
-
- const (
- legacyNamespace = "neo.fs.v2"
- apemanagerLegacyNamespace = "frostfs.v2"
- newNamespace = "frost.fs"
- )
-
- if strings.HasPrefix(sd.ServiceName, legacyNamespace) {
- sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, legacyNamespace, newNamespace)
- } else if strings.HasPrefix(sd.ServiceName, apemanagerLegacyNamespace) {
- sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, apemanagerLegacyNamespace, newNamespace)
- }
- return sdLegacy
-}
diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go
index 513314712..79c45c254 100644
--- a/cmd/frostfs-node/apemanager.go
+++ b/cmd/frostfs-node/apemanager.go
@@ -3,23 +3,22 @@ package main
import (
"net"
+ apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc"
ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage"
morph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
apemanager_transport "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/apemanager/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager"
- apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
"google.golang.org/grpc"
)
func initAPEManagerService(c *cfg) {
contractStorage := ape_contract.NewProxyVerificationContractStorage(
morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
- c.key,
+ c.shared.key,
c.cfgMorph.proxyScriptHash,
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
- c.cfgMorph.client,
apemanager.WithLogger(c.log))
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)
@@ -27,8 +26,5 @@ func initAPEManagerService(c *cfg) {
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
apemanager_grpc.RegisterAPEManagerServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(apemanager_grpc.APEManagerService_ServiceDesc), server)
})
}
diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go
index ce8ae9662..64c3beba7 100644
--- a/cmd/frostfs-node/attributes.go
+++ b/cmd/frostfs-node/attributes.go
@@ -6,5 +6,9 @@ import (
)
func parseAttributes(c *cfg) {
+ if nodeconfig.Relay(c.appCfg) {
+ return
+ }
+
fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg)))
}
diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go
index e5df0a22d..57f65d873 100644
--- a/cmd/frostfs-node/cache.go
+++ b/cmd/frostfs-node/cache.go
@@ -1,30 +1,22 @@
package main
import (
- "bytes"
- "cmp"
- "context"
- "slices"
"sync"
- "sync/atomic"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ lru "github.com/hashicorp/golang-lru/v2"
"github.com/hashicorp/golang-lru/v2/expirable"
- "github.com/hashicorp/golang-lru/v2/simplelru"
- "go.uber.org/zap"
)
-type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error)
+type netValueReader[K any, V any] func(K) (V, error)
type valueWithError[V any] struct {
v V
@@ -57,7 +49,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
// updates the value from the network on cache miss or by TTL.
//
// returned value should not be modified.
-func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
+func (c *ttlNetCache[K, V]) get(key K) (V, error) {
hit := false
startedAt := time.Now()
defer func() {
@@ -79,7 +71,7 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
return val.v, val.e
}
- v, err := c.netRdr(ctx, key)
+ v, err := c.netRdr(key)
c.cache.Add(key, &valueWithError[V]{
v: v,
@@ -117,6 +109,55 @@ func (c *ttlNetCache[K, V]) remove(key K) {
hit = c.cache.Remove(key)
}
+// entity that provides LRU cache interface.
+type lruNetCache struct {
+ cache *lru.Cache[uint64, *netmapSDK.NetMap]
+
+ netRdr netValueReader[uint64, *netmapSDK.NetMap]
+
+ metrics cacheMetrics
+}
+
+// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
+func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache {
+ cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
+ fatalOnErr(err)
+
+ return &lruNetCache{
+ cache: cache,
+ netRdr: netRdr,
+ metrics: metrics,
+ }
+}
+
+// reads value by the key.
+//
+// updates the value from the network on cache miss.
+//
+// returned value should not be modified.
+func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
+ hit := false
+ startedAt := time.Now()
+ defer func() {
+ c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
+ }()
+
+ val, ok := c.cache.Get(key)
+ if ok {
+ hit = true
+ return val, nil
+ }
+
+ val, err := c.netRdr(key)
+ if err != nil {
+ return nil, err
+ }
+
+ c.cache.Add(key, val)
+
+ return val, nil
+}
+
// wrapper over TTL cache of values read from the network
// that implements container storage.
type ttlContainerStorage struct {
@@ -124,12 +165,14 @@ type ttlContainerStorage struct {
delInfoCache *ttlNetCache[cid.ID, *container.DelInfo]
}
-func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
- lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) {
- return v.Get(ctx, id)
+func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
+ const containerCacheSize = 100
+
+ lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
+ return v.Get(id)
}, metrics.NewCacheMetrics("container"))
- lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
- return v.DeletionInfo(ctx, id)
+ lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) {
+ return v.DeletionInfo(id)
}, metrics.NewCacheMetrics("container_deletion_info"))
return ttlContainerStorage{
@@ -147,245 +190,68 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
// Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache.
-func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) {
- return s.containerCache.get(ctx, cnr)
+func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
+ return s.containerCache.get(cnr)
}
-func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) {
- return s.delInfoCache.get(ctx, cnr)
+func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
+ return s.delInfoCache.get(cnr)
+}
+
+type ttlEACLStorage struct {
+ *ttlNetCache[cid.ID, *container.EACL]
+}
+
+func newCachedEACLStorage(v container.EACLSource, ttl time.Duration) ttlEACLStorage {
+ const eaclCacheSize = 100
+
+ lruCnrCache := newNetworkTTLCache(eaclCacheSize, ttl, func(id cid.ID) (*container.EACL, error) {
+ return v.GetEACL(id)
+ }, metrics.NewCacheMetrics("eacl"))
+
+ return ttlEACLStorage{lruCnrCache}
+}
+
+// GetEACL returns eACL value from the cache. If value is missing in the cache
+// or expired, then it returns value from side chain and updates cache.
+func (s ttlEACLStorage) GetEACL(cnr cid.ID) (*container.EACL, error) {
+ return s.get(cnr)
+}
+
+// InvalidateEACL removes cached eACL value.
+func (s ttlEACLStorage) InvalidateEACL(cnr cid.ID) {
+ s.remove(cnr)
}
type lruNetmapSource struct {
netState netmap.State
- client rawSource
- cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]]
- mtx sync.RWMutex
- metrics cacheMetrics
- log *logger.Logger
- candidates atomic.Pointer[[]netmapSDK.NodeInfo]
+ cache *lruNetCache
}
-type rawSource interface {
- GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error)
- GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error)
-}
-
-func newCachedNetmapStorage(ctx context.Context, log *logger.Logger,
- netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration,
-) netmap.Source {
+func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10
- cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil)
- fatalOnErr(err)
+ lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
+ return v.GetNetMapByEpoch(key)
+ }, metrics.NewCacheMetrics("netmap"))
- src := &lruNetmapSource{
- netState: netState,
- client: client,
- cache: cache,
- log: log,
- metrics: metrics.NewCacheMetrics("netmap"),
- }
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- src.updateCandidates(ctx, d)
- }()
-
- return src
-}
-
-// updateCandidates routine to merge netmap in cache with candidates list.
-func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) {
- timer := time.NewTimer(d)
- defer timer.Stop()
-
- for {
- select {
- case <-ctx.Done():
- return
- case <-timer.C:
- newCandidates, err := s.client.GetCandidates(ctx)
- if err != nil {
- s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err))
- timer.Reset(d)
- break
- }
- if len(newCandidates) == 0 {
- s.candidates.Store(&newCandidates)
- timer.Reset(d)
- break
- }
- slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
- return cmp.Compare(n1.Hash(), n2.Hash())
- })
-
- // Check once state changed
- v := s.candidates.Load()
- if v == nil {
- s.candidates.Store(&newCandidates)
- s.mergeCacheWithCandidates(newCandidates)
- timer.Reset(d)
- break
- }
- ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
- if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) ||
- uint32(n1.Status()) != uint32(n2.Status()) ||
- slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 {
- return 1
- }
- ne1 := slices.Collect(n1.NetworkEndpoints())
- ne2 := slices.Collect(n2.NetworkEndpoints())
- return slices.Compare(ne1, ne2)
- })
- if ret != 0 {
- s.candidates.Store(&newCandidates)
- s.mergeCacheWithCandidates(newCandidates)
- }
- timer.Reset(d)
- }
+ return &lruNetmapSource{
+ netState: s,
+ cache: lruNetmapCache,
}
}
-func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) {
- s.mtx.Lock()
- tmp := s.cache.Values()
- s.mtx.Unlock()
- for _, pointer := range tmp {
- nm := pointer.Load()
- updates := getNetMapNodesToUpdate(nm, candidates)
- if len(updates) > 0 {
- nm = nm.Clone()
- mergeNetmapWithCandidates(updates, nm)
- pointer.Store(nm)
- }
- }
+func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
+ return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
}
-// reads value by the key.
-//
-// updates the value from the network on cache miss.
-//
-// returned value should not be modified.
-func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
- hit := false
- startedAt := time.Now()
- defer func() {
- s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
- }()
-
- s.mtx.RLock()
- val, ok := s.cache.Get(key)
- s.mtx.RUnlock()
- if ok {
- hit = true
- return val.Load(), nil
- }
-
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- val, ok = s.cache.Get(key)
- if ok {
- hit = true
- return val.Load(), nil
- }
-
- nm, err := s.client.GetNetMapByEpoch(ctx, key)
- if err != nil {
- return nil, err
- }
- v := s.candidates.Load()
- if v != nil {
- updates := getNetMapNodesToUpdate(nm, *v)
- if len(updates) > 0 {
- mergeNetmapWithCandidates(updates, nm)
- }
- }
-
- p := atomic.Pointer[netmapSDK.NetMap]{}
- p.Store(nm)
- s.cache.Add(key, &p)
-
- return nm, nil
+func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
+ return s.getNetMapByEpoch(epoch)
}
-// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates.
-func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) {
- for _, v := range updates {
- if v.status != netmapSDK.UnspecifiedState {
- nm.Nodes()[v.netmapIndex].SetStatus(v.status)
- }
- if v.externalAddresses != nil {
- nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...)
- }
- if v.endpoints != nil {
- nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...)
- }
- }
-}
-
-type nodeToUpdate struct {
- netmapIndex int
- status netmapSDK.NodeState
- externalAddresses []string
- endpoints []string
-}
-
-// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates.
-func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate {
- var res []nodeToUpdate
- for i := range nm.Nodes() {
- for _, cnd := range candidates {
- if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) {
- var tmp nodeToUpdate
- var update bool
-
- if cnd.Status() != nm.Nodes()[i].Status() &&
- (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) {
- update = true
- tmp.status = cnd.Status()
- }
-
- externalAddresses := cnd.ExternalAddresses()
- if externalAddresses != nil &&
- slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 {
- update = true
- tmp.externalAddresses = externalAddresses
- }
-
- nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints())
- nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints())
- candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints())
- candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints())
- if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 {
- update = true
- tmp.endpoints = candidateEndpoints
- }
-
- if update {
- tmp.netmapIndex = i
- res = append(res, tmp)
- }
-
- break
- }
- }
- }
- return res
-}
-
-func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
- return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff)
-}
-
-func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- return s.getNetMapByEpoch(ctx, epoch)
-}
-
-func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- val, err := s.get(ctx, epoch)
+func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
+ val, err := s.cache.get(epoch)
if err != nil {
return nil, err
}
@@ -393,7 +259,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*
return val, nil
}
-func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) {
+func (s *lruNetmapSource) Epoch() (uint64, error) {
return s.netState.CurrentEpoch(), nil
}
@@ -401,10 +267,7 @@ type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte]
}
-func newCachedIRFetcher(f interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
-},
-) cachedIRFetcher {
+func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
const (
irFetcherCacheSize = 1 // we intend to store only one value
@@ -418,8 +281,8 @@ func newCachedIRFetcher(f interface {
)
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
- func(ctx context.Context, _ struct{}) ([][]byte, error) {
- return f.InnerRingKeys(ctx)
+ func(_ struct{}) ([][]byte, error) {
+ return f.InnerRingKeys()
}, metrics.NewCacheMetrics("ir_keys"),
)
@@ -429,8 +292,8 @@ func newCachedIRFetcher(f interface {
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates
// the cache.
-func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) {
- val, err := f.get(ctx, struct{}{})
+func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
+ val, err := f.get(struct{}{})
if err != nil {
return nil, err
}
@@ -453,7 +316,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M
}
}
-func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
+func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
const ttl = time.Second * 30
hit := false
@@ -475,7 +338,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
c.mtx.Lock()
size = c.lastSize
if !c.lastUpdated.After(prevUpdated) {
- size = c.src.MaxObjectSize(ctx)
+ size = c.src.MaxObjectSize()
c.lastSize = size
c.lastUpdated = time.Now()
}
diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go
index 24286826f..f8c324a2f 100644
--- a/cmd/frostfs-node/cache_test.go
+++ b/cmd/frostfs-node/cache_test.go
@@ -1,13 +1,10 @@
package main
import (
- "context"
"errors"
- "sync"
"testing"
"time"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/stretchr/testify/require"
)
@@ -20,7 +17,7 @@ func TestTTLNetCache(t *testing.T) {
t.Run("Test Add and Get", func(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
- val, err := cache.get(context.Background(), key)
+ val, err := cache.get(key)
require.NoError(t, err)
require.Equal(t, ti, val)
})
@@ -29,7 +26,7 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
time.Sleep(2 * ttlDuration)
- val, err := cache.get(context.Background(), key)
+ val, err := cache.get(key)
require.NoError(t, err)
require.NotEqual(t, val, ti)
})
@@ -38,20 +35,20 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
cache.remove(key)
- val, err := cache.get(context.Background(), key)
+ val, err := cache.get(key)
require.NoError(t, err)
require.NotEqual(t, val, ti)
})
t.Run("Test Cache Error", func(t *testing.T) {
cache.set("error", time.Now(), errors.New("mock error"))
- _, err := cache.get(context.Background(), "error")
+ _, err := cache.get("error")
require.Error(t, err)
require.Equal(t, "mock error", err.Error())
})
}
-func testNetValueReader(_ context.Context, key string) (time.Time, error) {
+func testNetValueReader(key string) (time.Time, error) {
if key == "error" {
return time.Now(), errors.New("mock error")
}
@@ -61,75 +58,3 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) {
type noopCacheMetricts struct{}
func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {}
-
-type rawSrc struct{}
-
-func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) {
- node0 := netmapSDK.NodeInfo{}
- node0.SetPublicKey([]byte{byte(1)})
- node0.SetStatus(netmapSDK.Online)
- node0.SetExternalAddresses("1", "0")
- node0.SetNetworkEndpoints("1", "0")
-
- node1 := netmapSDK.NodeInfo{}
- node1.SetPublicKey([]byte{byte(1)})
- node1.SetStatus(netmapSDK.Online)
- node1.SetExternalAddresses("1", "0")
- node1.SetNetworkEndpoints("1", "0")
-
- return []netmapSDK.NodeInfo{node0, node1}, nil
-}
-
-func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- nm := netmapSDK.NetMap{}
- nm.SetEpoch(1)
-
- node0 := netmapSDK.NodeInfo{}
- node0.SetPublicKey([]byte{byte(1)})
- node0.SetStatus(netmapSDK.Maintenance)
- node0.SetExternalAddresses("0")
- node0.SetNetworkEndpoints("0")
-
- node1 := netmapSDK.NodeInfo{}
- node1.SetPublicKey([]byte{byte(1)})
- node1.SetStatus(netmapSDK.Maintenance)
- node1.SetExternalAddresses("0")
- node1.SetNetworkEndpoints("0")
-
- nm.SetNodes([]netmapSDK.NodeInfo{node0, node1})
-
- return &nm, nil
-}
-
-type st struct{}
-
-func (s *st) CurrentEpoch() uint64 {
- return 1
-}
-
-func TestNetmapStorage(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- wg := sync.WaitGroup{}
- cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50)
-
- nm, err := cache.GetNetMapByEpoch(ctx, 1)
- require.NoError(t, err)
- require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance)
- require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1)
- require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1)
-
- require.Eventually(t, func() bool {
- nm, err := cache.GetNetMapByEpoch(ctx, 1)
- require.NoError(t, err)
- for _, node := range nm.Nodes() {
- if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 &&
- node.NumberOfNetworkEndpoints() == 2) {
- return false
- }
- }
- return true
- }, time.Second*5, time.Millisecond*10)
-
- cancel()
- wg.Wait()
-}
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 96274e625..c0019d36a 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -15,6 +15,7 @@ import (
"syscall"
"time"
+ netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/audit"
@@ -25,23 +26,18 @@ import (
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
- treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
- internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -60,7 +56,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone"
tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
@@ -72,8 +67,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
- netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -109,18 +102,11 @@ type applicationConfiguration struct {
LoggerCfg struct {
level string
destination string
- timestamp bool
- options []zap.Option
- tags [][]string
- }
-
- ObjectCfg struct {
- tombstoneLifetime uint64
- priorityMetrics []placement.Metric
}
EngineCfg struct {
errorThreshold uint32
+ shardPoolSize uint32
shards []shardCfg
lowMem bool
}
@@ -130,13 +116,15 @@ type applicationConfiguration struct {
}
type shardCfg struct {
- compression compression.Config
+ compress bool
+ estimateCompressibility bool
+ estimateCompressibilityThreshold float64
smallSizeObjectLimit uint64
+ uncompressableContentType []string
refillMetabase bool
refillMetabaseWorkersCount int
mode shardmode.Mode
- limiter qos.Limiter
metaCfg struct {
path string
@@ -232,72 +220,49 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c)
- a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
- var opts []zap.Option
- if loggerconfig.ToLokiConfig(c).Enabled {
- opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
- lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
- return lokiCore
- })}
- }
- a.LoggerCfg.options = opts
- a.LoggerCfg.tags = loggerconfig.Tags(c)
-
- // Object
-
- a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c)
- locodeDBPath := nodeconfig.LocodeDBPath(c)
- parser, err := placement.NewMetricsParser(locodeDBPath)
- if err != nil {
- return fmt.Errorf("metrics parser creation: %w", err)
- }
- m, err := parser.ParseMetrics(objectconfig.Get(c).Priority())
- if err != nil {
- return fmt.Errorf("parse metrics: %w", err)
- }
- a.ObjectCfg.priorityMetrics = m
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
+ a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
}
-func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error {
- var target shardCfg
+func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
+ var newConfig shardCfg
- target.refillMetabase = source.RefillMetabase()
- target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount()
- target.mode = source.Mode()
- target.compression = source.Compression()
- target.smallSizeObjectLimit = source.SmallSizeLimit()
+ newConfig.refillMetabase = oldConfig.RefillMetabase()
+ newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
+ newConfig.mode = oldConfig.Mode()
+ newConfig.compress = oldConfig.Compress()
+ newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
+ newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
+ newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
+ newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
- a.setShardWriteCacheConfig(&target, source)
+ a.setShardWriteCacheConfig(&newConfig, oldConfig)
- a.setShardPiloramaConfig(c, &target, source)
+ a.setShardPiloramaConfig(c, &newConfig, oldConfig)
- if err := a.setShardStorageConfig(&target, source); err != nil {
+ if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
return err
}
- a.setMetabaseConfig(&target, source)
+ a.setMetabaseConfig(&newConfig, oldConfig)
- a.setGCConfig(&target, source)
- if err := a.setLimiter(&target, source); err != nil {
- return err
- }
+ a.setGCConfig(&newConfig, oldConfig)
- a.EngineCfg.shards = append(a.EngineCfg.shards, target)
+ a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
return nil
}
-func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) {
- writeCacheCfg := source.WriteCache()
+func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
+ writeCacheCfg := oldConfig.WriteCache()
if writeCacheCfg.Enabled() {
- wc := &target.writecacheCfg
+ wc := &newConfig.writecacheCfg
wc.enabled = true
wc.path = writeCacheCfg.Path()
@@ -310,10 +275,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, so
}
}
-func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) {
+func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
if config.BoolSafe(c.Sub("tree"), "enabled") {
- piloramaCfg := source.Pilorama()
- pr := &target.piloramaCfg
+ piloramaCfg := oldConfig.Pilorama()
+ pr := &newConfig.piloramaCfg
pr.enabled = true
pr.path = piloramaCfg.Path()
@@ -324,8 +289,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, targ
}
}
-func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error {
- blobStorCfg := source.BlobStor()
+func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
+ blobStorCfg := oldConfig.BlobStor()
storagesCfg := blobStorCfg.Storages()
ss := make([]subStorageCfg, 0, len(storagesCfg))
@@ -359,13 +324,13 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc
ss = append(ss, sCfg)
}
- target.subStorages = ss
+ newConfig.subStorages = ss
return nil
}
-func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) {
- metabaseCfg := source.Metabase()
- m := &target.metaCfg
+func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
+ metabaseCfg := oldConfig.Metabase()
+ m := &newConfig.metaCfg
m.path = metabaseCfg.Path()
m.perm = metabaseCfg.BoltDB().Perm()
@@ -373,22 +338,12 @@ func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *s
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
}
-func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) {
- gcCfg := source.GC()
- target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
- target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
- target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
- target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
-}
-
-func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
- limitsConfig := source.Limits().ToConfig()
- limiter, err := qos.NewLimiter(limitsConfig)
- if err != nil {
- return err
- }
- target.limiter = limiter
- return nil
+func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
+ gcCfg := oldConfig.GC()
+ newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
+ newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
+ newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
+ newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
}
// internals contains application-specific internals that are created
@@ -419,16 +374,16 @@ type internals struct {
}
// starts node's maintenance.
-func (c *cfg) startMaintenance(ctx context.Context) {
+func (c *cfg) startMaintenance() {
c.isMaintenance.Store(true)
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
- c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance)
+ c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
}
// stops node's maintenance.
-func (c *internals) stopMaintenance(ctx context.Context) {
+func (c *internals) stopMaintenance() {
if c.isMaintenance.CompareAndSwap(true, false) {
- c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance)
+ c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
}
}
@@ -471,13 +426,12 @@ type shared struct {
metricsCollector *metrics.NodeMetrics
metricsSvc *objectService.MetricCollector
-
- dialerSource *internalNet.DialerSource
}
// dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations.
type dynamicConfiguration struct {
+ logger *logger.Prm
pprof *httpComponent
metrics *httpComponent
}
@@ -514,7 +468,6 @@ type cfg struct {
cfgNetmap cfgNetmap
cfgControlService cfgControlService
cfgObject cfgObject
- cfgQoSService cfgQoSService
}
// ReadCurrentNetMap reads network map which has been cached at the
@@ -549,8 +502,6 @@ type cfgGRPC struct {
maxChunkSize uint64
maxAddrAmount uint64
reconnectTimeout time.Duration
-
- limiter atomic.Pointer[limiting.SemaphoreLimiter]
}
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
@@ -610,16 +561,13 @@ func (c *cfgGRPC) dropConnection(endpoint string) {
}
type cfgMorph struct {
- initialized bool
- guard sync.Mutex
-
client *client.Client
+ notaryEnabled bool
+
// TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration
- containerCacheSize uint32
-
proxyScriptHash neogoutil.Uint160
}
@@ -630,10 +578,9 @@ type cfgAccounting struct {
type cfgContainer struct {
scriptHash neogoutil.Uint160
- parsers map[event.Type]event.NotificationParser
- subscribers map[event.Type][]event.Handler
- workerPool util.WorkerPool // pool for asynchronous handlers
- containerBatchSize uint32
+ parsers map[event.Type]event.NotificationParser
+ subscribers map[event.Type][]event.Handler
+ workerPool util.WorkerPool // pool for asynchronous handlers
}
type cfgFrostfsID struct {
@@ -651,7 +598,9 @@ type cfgNetmap struct {
state *networkState
+ needBootstrap bool
reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime
+ startEpoch uint64 // epoch number when application is started
}
type cfgNodeInfo struct {
@@ -664,13 +613,15 @@ type cfgObject struct {
cnrSource container.Source
+ eaclSource container.EACLSource
+
cfgAccessPolicyEngine cfgAccessPolicyEngine
pool cfgObjectRoutines
cfgLocalStorage cfgLocalStorage
- tombstoneLifetime *atomic.Uint64
+ tombstoneLifetime uint64
skipSessionTokenIssuerVerification bool
}
@@ -686,6 +637,10 @@ type cfgAccessPolicyEngine struct {
}
type cfgObjectRoutines struct {
+ putRemote *ants.Pool
+
+ putLocal *ants.Pool
+
replication *ants.Pool
}
@@ -709,9 +664,11 @@ func initCfg(appCfg *config.Config) *cfg {
key := nodeconfig.Key(appCfg)
+ relayOnly := nodeconfig.Relay(appCfg)
+
netState := newNetworkState()
- c.shared = initShared(appCfg, key, netState)
+ c.shared = initShared(appCfg, key, netState, relayOnly)
netState.metrics = c.metricsCollector
@@ -720,7 +677,12 @@ func initCfg(appCfg *config.Config) *cfg {
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
- logger.UpdateLevelForTags(logPrm)
+ if loggerconfig.ToLokiConfig(appCfg).Enabled {
+ log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
+ lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
+ return lokiCore
+ }))
+ }
c.internals = initInternals(appCfg, log)
@@ -731,7 +693,7 @@ func initCfg(appCfg *config.Config) *cfg {
c.cfgFrostfsID = initFrostfsID(appCfg)
- c.cfgNetmap = initNetmap(appCfg, netState)
+ c.cfgNetmap = initNetmap(appCfg, netState, relayOnly)
c.cfgGRPC = initCfgGRPC()
@@ -777,24 +739,22 @@ func initSdNotify(appCfg *config.Config) bool {
return false
}
-func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared {
- netAddr := nodeconfig.BootstrapAddresses(appCfg)
+func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared {
+ var netAddr network.AddressGroup
+
+ if !relayOnly {
+ netAddr = nodeconfig.BootstrapAddresses(appCfg)
+ }
persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path())
fatalOnErr(err)
- nodeMetrics := metrics.NewNodeMetrics()
-
- ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg, nodeMetrics.MultinetMetrics()))
- fatalOnErr(err)
-
cacheOpts := cache.ClientCacheOpts{
DialTimeout: apiclientconfig.DialTimeout(appCfg),
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
Key: &key.PrivateKey,
AllowExternal: apiclientconfig.AllowExternal(appCfg),
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
- DialerSource: ds,
}
return shared{
@@ -806,38 +766,22 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
bgClientCache: cache.NewSDKClientCache(cacheOpts),
putClientCache: cache.NewSDKClientCache(cacheOpts),
persistate: persistate,
- metricsCollector: nodeMetrics,
- dialerSource: ds,
+ metricsCollector: metrics.NewNodeMetrics(),
}
}
-func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) internalNet.Config {
- result := internalNet.Config{
- Enabled: multinet.Enabled(appCfg),
- Balancer: multinet.Balancer(appCfg),
- Restrict: multinet.Restrict(appCfg),
- FallbackDelay: multinet.FallbackDelay(appCfg),
- Metrics: m,
- }
- sn := multinet.Subnets(appCfg)
- for _, s := range sn {
- result.Subnets = append(result.Subnets, internalNet.Subnet{
- Prefix: s.Mask,
- SourceIPs: s.SourceIPs,
- })
- }
- return result
-}
-
-func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap {
+func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap {
netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
fatalOnErr(err)
+ var reBootstrapTurnedOff atomic.Bool
+ reBootstrapTurnedOff.Store(relayOnly)
return cfgNetmap{
scriptHash: contractsconfig.Netmap(appCfg),
state: netState,
workerPool: netmapWorkerPool,
- reBoostrapTurnedOff: &atomic.Bool{},
+ needBootstrap: !relayOnly,
+ reBoostrapTurnedOff: &reBootstrapTurnedOff,
}
}
@@ -857,22 +801,20 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
}
}
-func initCfgGRPC() (cfg cfgGRPC) {
- maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
- maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
+func initCfgGRPC() cfgGRPC {
+ maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
+ maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
- cfg.maxChunkSize = maxChunkSize
- cfg.maxAddrAmount = maxAddrAmount
-
- return
+ return cfgGRPC{
+ maxChunkSize: maxChunkSize,
+ maxAddrAmount: maxAddrAmount,
+ }
}
func initCfgObject(appCfg *config.Config) cfgObject {
- var tsLifetime atomic.Uint64
- tsLifetime.Store(objectconfig.TombstoneLifetime(appCfg))
return cfgObject{
pool: initObjectPool(appCfg),
- tombstoneLifetime: &tsLifetime,
+ tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg),
skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(),
}
}
@@ -881,8 +823,9 @@ func (c *cfg) engineOpts() []engine.Option {
var opts []engine.Option
opts = append(opts,
+ engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
- engine.WithLogger(c.log.WithTag(logger.TagEngine)),
+ engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
)
@@ -919,8 +862,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithMaxCacheSize(wcRead.sizeLimit),
writecache.WithMaxCacheCount(wcRead.countLimit),
writecache.WithNoSync(wcRead.noSync),
- writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)),
- writecache.WithQoSLimiter(shCfg.limiter),
+ writecache.WithLogger(c.log),
)
}
return writeCacheOpts
@@ -959,8 +901,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval),
blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount),
blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout),
- blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)),
- blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)),
+ blobovniczatree.WithLogger(c.log),
blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit),
}
@@ -983,7 +924,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
fstree.WithPerm(sRead.perm),
fstree.WithDepth(sRead.depth),
fstree.WithNoSync(sRead.noSync),
- fstree.WithLogger(c.log.WithTag(logger.TagFSTree)),
+ fstree.WithLogger(c.log),
}
if c.metricsCollector != nil {
fstreeOpts = append(fstreeOpts,
@@ -1013,9 +954,12 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
ss := c.getSubstorageOpts(ctx, shCfg)
blobstoreOpts := []blobstor.Option{
- blobstor.WithCompression(shCfg.compression),
+ blobstor.WithCompressObjects(shCfg.compress),
+ blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType),
+ blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility),
+ blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold),
blobstor.WithStorages(ss),
- blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)),
+ blobstor.WithLogger(c.log),
}
if c.metricsCollector != nil {
blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore())))
@@ -1034,13 +978,12 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
}
if c.metricsCollector != nil {
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
- shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
}
var sh shardOptsWithID
sh.configID = shCfg.id()
sh.shOpts = []shard.Option{
- shard.WithLogger(c.log.WithTag(logger.TagShard)),
+ shard.WithLogger(c.log),
shard.WithRefillMetabase(shCfg.refillMetabase),
shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount),
shard.WithMode(shCfg.mode),
@@ -1059,33 +1002,29 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return pool
}),
- shard.WithLimiter(shCfg.limiter),
}
return sh
}
-func (c *cfg) loggerPrm() (logger.Prm, error) {
- var prm logger.Prm
- // (re)init read configuration
- err := prm.SetLevelString(c.LoggerCfg.level)
- if err != nil {
- // not expected since validation should be performed before
- return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
- }
- err = prm.SetDestination(c.LoggerCfg.destination)
- if err != nil {
- // not expected since validation should be performed before
- return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
- }
- prm.PrependTimestamp = c.LoggerCfg.timestamp
- prm.Options = c.LoggerCfg.options
- err = prm.SetTags(c.LoggerCfg.tags)
- if err != nil {
- // not expected since validation should be performed before
- return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination)
+func (c *cfg) loggerPrm() (*logger.Prm, error) {
+ // check if it has been inited before
+ if c.dynamicConfiguration.logger == nil {
+ c.dynamicConfiguration.logger = new(logger.Prm)
}
- return prm, nil
+ // (re)init read configuration
+ err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
+ if err != nil {
+ // not expected since validation should be performed before
+ panic("incorrect log level format: " + c.LoggerCfg.level)
+ }
+ err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
+ if err != nil {
+ // not expected since validation should be performed before
+ panic("incorrect log destination format: " + c.LoggerCfg.destination)
+ }
+
+ return c.dynamicConfiguration.logger, nil
}
func (c *cfg) LocalAddress() network.AddressGroup {
@@ -1095,7 +1034,7 @@ func (c *cfg) LocalAddress() network.AddressGroup {
func initLocalStorage(ctx context.Context, c *cfg) {
ls := engine.New(c.engineOpts()...)
- addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
+ addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber())
})
@@ -1105,14 +1044,12 @@ func initLocalStorage(ctx context.Context, c *cfg) {
var shardsAttached int
for _, optsWithMeta := range c.shardOpts(ctx) {
- id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts,
- shard.WithTombstoneSource(c.createTombstoneSource()),
- shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
+ id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))...)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
shardsAttached++
- c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
+ c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
}
}
if shardsAttached == 0 {
@@ -1122,26 +1059,27 @@ func initLocalStorage(ctx context.Context, c *cfg) {
c.cfgObject.cfgLocalStorage.localStorage = ls
c.onShutdown(func() {
- c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
+ c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
err := ls.Close(context.WithoutCancel(ctx))
if err != nil {
- c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
- zap.Error(err),
+ c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
+ zap.String("error", err.Error()),
)
} else {
- c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
+ c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
}
})
}
-func initAccessPolicyEngine(ctx context.Context, c *cfg) {
+func initAccessPolicyEngine(_ context.Context, c *cfg) {
var localOverrideDB chainbase.LocalOverrideDatabase
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
- c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
+ c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
} else {
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
+ chainbase.WithLogger(c.log),
chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()),
chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()),
chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()),
@@ -1154,7 +1092,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
- if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
+ if cacheSize > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
}
@@ -1163,7 +1101,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.onShutdown(func() {
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
- c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure,
+ c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure,
zap.Error(err),
)
}
@@ -1173,22 +1111,38 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
var err error
+ optNonBlocking := ants.WithNonblocking(true)
+
+ putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
+ pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
+ fatalOnErr(err)
+
+ putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
+ pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
+ fatalOnErr(err)
+
replicatorPoolSize := replicatorconfig.PoolSize(cfg)
+ if replicatorPoolSize <= 0 {
+ replicatorPoolSize = putRemoteCapacity
+ }
+
pool.replication, err = ants.NewPool(replicatorPoolSize)
fatalOnErr(err)
return pool
}
-func (c *cfg) LocalNodeInfo() *netmap.NodeInfo {
- var res netmap.NodeInfo
+func (c *cfg) LocalNodeInfo() (*netmapV2.NodeInfo, error) {
+ var res netmapV2.NodeInfo
+
ni, ok := c.cfgNetmap.state.getNodeInfo()
if ok {
- res = ni
+ ni.WriteToV2(&res)
} else {
- res = c.cfgNodeInfo.localInfo
+ c.cfgNodeInfo.localInfo.WriteToV2(&res)
}
- return &res
+
+ return &res, nil
}
// setContractNodeInfo rewrites local node info from the FrostFS network map.
@@ -1198,12 +1152,12 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
c.cfgNetmap.state.setNodeInfo(ni)
}
-func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
- ni, err := c.netmapLocalNodeState(ctx, epoch)
+func (c *cfg) updateContractNodeInfo(epoch uint64) {
+ ni, err := c.netmapLocalNodeState(epoch)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
+ c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
- zap.Error(err))
+ zap.String("error", err.Error()))
return
}
@@ -1213,37 +1167,42 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil.
-func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
+func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
ni := c.cfgNodeInfo.localInfo
- ni.SetStatus(state)
+ stateSetter(&ni)
prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni)
- return c.cfgNetmap.wrapper.AddPeer(ctx, prm)
+ return c.cfgNetmap.wrapper.AddPeer(prm)
}
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
-func bootstrapOnline(ctx context.Context, c *cfg) error {
- return c.bootstrapWithState(ctx, netmap.Online)
+func bootstrapOnline(c *cfg) error {
+ return c.bootstrapWithState((*netmap.NodeInfo).SetOnline)
}
// bootstrap calls bootstrapWithState with:
// - "maintenance" state if maintenance is in progress on the current node
// - "online", otherwise
-func (c *cfg) bootstrap(ctx context.Context) error {
+func (c *cfg) bootstrap() error {
// switch to online except when under maintenance
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
- c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
- return c.bootstrapWithState(ctx, netmap.Maintenance)
+ c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
+ return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
}
- c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
+ c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
zap.Stringer("previous", st),
)
- return bootstrapOnline(ctx, c)
+ return bootstrapOnline(c)
+}
+
+// needBootstrap checks if local node should be registered in network on bootup.
+func (c *cfg) needBootstrap() bool {
+ return c.cfgNetmap.needBootstrap
}
type dCmp struct {
@@ -1263,19 +1222,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
// signals causing application to shut down should have priority over
// reconfiguration signal
case <-ch:
- c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
- c.shutdown(ctx)
+ c.shutdown()
- c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-c.internalErr: // internal application error
- c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
+ c.log.Warn(logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
- c.shutdown(ctx)
+ c.shutdown()
- c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
+ c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
default:
// block until any signal is receieved
@@ -1283,19 +1242,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
case <-sighupCh:
c.reloadConfig(ctx)
case <-ch:
- c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
- c.shutdown(ctx)
+ c.shutdown()
- c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-c.internalErr: // internal application error
- c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
+ c.log.Warn(logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
- c.shutdown(ctx)
+ c.shutdown()
- c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
+ c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
}
}
@@ -1303,74 +1262,64 @@ func (c *cfg) signalWatcher(ctx context.Context) {
}
func (c *cfg) reloadConfig(ctx context.Context) {
- c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
+ c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
- if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
- c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
+ if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
+ c.log.Info(logs.FrostFSNodeSIGHUPSkip)
return
}
- defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
+ defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
err := c.reloadAppConfig()
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
return
}
// all the components are expected to support
// Logger's dynamic reconfiguration approach
- components := c.getComponents(ctx)
+ // Logger
- // Object
- c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
+ logPrm, err := c.loggerPrm()
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
+ return
+ }
+
+ components := c.getComponents(ctx, logPrm)
// Storage Engine
var rcfg engine.ReConfiguration
for _, optsWithID := range c.shardOpts(ctx) {
- rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts,
- shard.WithTombstoneSource(c.createTombstoneSource()),
- shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)),
- ))
+ rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
}
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
return
}
for _, component := range components {
err = component.reloadFunc()
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
+ c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
zap.String("component", component.name),
zap.Error(err))
}
}
- if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
- c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
- return
- }
-
- c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+ c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
-func (c *cfg) getComponents(ctx context.Context) []dCmp {
+func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
var components []dCmp
- components = append(components, dCmp{"logger", func() error {
- prm, err := c.loggerPrm()
- if err != nil {
- return err
- }
- logger.UpdateLevelForTags(prm)
- return nil
- }})
+ components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
- setRuntimeParameters(ctx, c)
+ setRuntimeParameters(c)
return nil
}})
components = append(components, dCmp{"audit", func() error {
@@ -1385,16 +1334,10 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp {
}
updated, err := tracing.Setup(ctx, *traceConfig)
if updated {
- c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
+ c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
}
return err
}})
- if c.treeService != nil {
- components = append(components, dCmp{"tree", func() error {
- c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys())
- return nil
- }})
- }
if cmp, updated := metricsComponent(c); updated {
if cmp.enabled {
cmp.preReload = enableMetricsSvc
@@ -1407,13 +1350,17 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
- components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
-
return components
}
func (c *cfg) reloadPools() error {
- newSize := replicatorconfig.PoolSize(c.appCfg)
+ newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
+ c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
+
+ newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
+ c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
+
+ newSize = replicatorconfig.PoolSize(c.appCfg)
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
return nil
@@ -1422,7 +1369,7 @@ func (c *cfg) reloadPools() error {
func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
oldSize := p.Cap()
if oldSize != newSize {
- c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
+ c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
zap.Int("old", oldSize), zap.Int("new", newSize))
p.Tune(newSize)
}
@@ -1447,25 +1394,14 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
return tombstoneSource
}
-func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
- return container.NewInfoProvider(func() (container.Source, error) {
- c.initMorphComponents(ctx)
- cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
- if err != nil {
- return nil, err
- }
- return containerClient.AsContainerSource(cc), nil
- })
-}
-
-func (c *cfg) shutdown(ctx context.Context) {
- old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
+func (c *cfg) shutdown() {
+ old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
if old == control.HealthStatus_SHUTTING_DOWN {
- c.log.Info(ctx, logs.FrostFSNodeShutdownSkip)
+ c.log.Info(logs.FrostFSNodeShutdownSkip)
return
}
if old == control.HealthStatus_STARTING {
- c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady)
+ c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
}
c.ctxCancel()
@@ -1475,6 +1411,6 @@ func (c *cfg) shutdown(ctx context.Context) {
}
if err := sdnotify.ClearStatus(); err != nil {
- c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
+ c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}
diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go
index c40bf3620..36e53ea7c 100644
--- a/cmd/frostfs-node/config/calls.go
+++ b/cmd/frostfs-node/config/calls.go
@@ -1,7 +1,6 @@
package config
import (
- "slices"
"strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used
// to provide default values.
func (x *Config) SetDefault(from *Config) {
- x.defaultPath = slices.Clone(from.path)
+ x.defaultPath = make([]string, len(from.path))
+ copy(x.defaultPath, from.path)
}
diff --git a/cmd/frostfs-node/config/calls_test.go b/cmd/frostfs-node/config/calls_test.go
index bc149eb7d..68bf1c679 100644
--- a/cmd/frostfs-node/config/calls_test.go
+++ b/cmd/frostfs-node/config/calls_test.go
@@ -1,6 +1,7 @@
package config_test
import (
+ "os"
"strings"
"testing"
@@ -37,7 +38,8 @@ func TestConfigEnv(t *testing.T) {
envName := strings.ToUpper(
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
- t.Setenv(envName, value)
+ err := os.Setenv(envName, value)
+ require.NoError(t, err)
c := configtest.EmptyConfig()
diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go
index ee9d4268b..35dae97d9 100644
--- a/cmd/frostfs-node/config/configdir_test.go
+++ b/cmd/frostfs-node/config/configdir_test.go
@@ -12,10 +12,13 @@ import (
func TestConfigDir(t *testing.T) {
dir := t.TempDir()
- cfgFileName := path.Join(dir, "cfg_01.yml")
+ cfgFileName0 := path.Join(dir, "cfg_00.json")
+ cfgFileName1 := path.Join(dir, "cfg_01.yml")
- require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777))
+ require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
+ require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
c := New("", dir, "")
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
+ require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
}
diff --git a/cmd/frostfs-node/config/container/container.go b/cmd/frostfs-node/config/container/container.go
deleted file mode 100644
index 1cd64a6f8..000000000
--- a/cmd/frostfs-node/config/container/container.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package containerconfig
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
-
-const (
- subsection = "container"
- listStreamSubsection = "list_stream"
-
- // ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
- ContainerBatchSizeDefault = 1000
-)
-
-// ContainerBatchSize returns the value of "batch_size" config parameter
-// from "list_stream" subsection of "container" section.
-//
-// Returns ContainerBatchSizeDefault if the value is missing or if
-// the value is not positive integer.
-func ContainerBatchSize(c *config.Config) uint32 {
- if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
- return ContainerBatchSizeDefault
- }
- size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
- if size == 0 {
- return ContainerBatchSizeDefault
- }
- return size
-}
diff --git a/cmd/frostfs-node/config/container/container_test.go b/cmd/frostfs-node/config/container/container_test.go
deleted file mode 100644
index 744cd3295..000000000
--- a/cmd/frostfs-node/config/container/container_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package containerconfig_test
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestContainerSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- empty := configtest.EmptyConfig()
- require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
- })
-
- const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
- require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go
index 7994e7809..c944d1c58 100644
--- a/cmd/frostfs-node/config/engine/config.go
+++ b/cmd/frostfs-node/config/engine/config.go
@@ -11,6 +11,10 @@ import (
const (
subsection = "storage"
+
+ // ShardPoolSizeDefault is a default value of routine pool size per-shard to
+ // process object PUT operations in a storage engine.
+ ShardPoolSizeDefault = 20
)
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@@ -37,10 +41,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
c.Sub(si),
)
- if sc.Mode() == mode.Disabled {
- continue
- }
-
// Path for the blobstor can't be present in the default section, because different shards
// must have different paths, so if it is missing, the shard is not here.
// At the same time checking for "blobstor" section doesn't work proper
@@ -50,6 +50,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
}
(*config.Config)(sc).SetDefault(def)
+ if sc.Mode() == mode.Disabled {
+ continue
+ }
+
if err := f(sc); err != nil {
return err
}
@@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
return nil
}
+// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
+//
+// Returns ShardPoolSizeDefault if the value is not a positive number.
+func ShardPoolSize(c *config.Config) uint32 {
+ v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
+ if v > 0 {
+ return v
+ }
+
+ return ShardPoolSizeDefault
+}
+
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
//
// Returns 0 if the the value is missing.
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index 401c54edc..19ad0e7ac 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -14,28 +14,10 @@ import (
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
)
-func TestIterateShards(t *testing.T) {
- fileConfigTest := func(c *config.Config) {
- var res []string
- require.NoError(t,
- engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
- res = append(res, sc.Metabase().Path())
- return nil
- }))
- require.Equal(t, []string{"abc", "xyz"}, res)
- }
-
- const cfgDir = "./testdata/shards"
- configtest.ForEachFileType(cfgDir, fileConfigTest)
- configtest.ForEnvFileType(t, cfgDir, fileConfigTest)
-}
-
func TestEngineSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
@@ -55,6 +37,7 @@ func TestEngineSection(t *testing.T) {
require.False(t, handlerCalled)
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
+ require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
})
@@ -64,6 +47,7 @@ func TestEngineSection(t *testing.T) {
num := 0
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
+ require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() {
@@ -76,7 +60,6 @@ func TestEngineSection(t *testing.T) {
ss := blob.Storages()
pl := sc.Pilorama()
gc := sc.GC()
- limits := sc.Limits()
switch num {
case 0:
@@ -101,11 +84,10 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, 100, meta.BoltDB().MaxBatchSize())
require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay())
- require.Equal(t, true, sc.Compression().Enabled)
- require.Equal(t, compression.LevelFastest, sc.Compression().Level)
- require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes)
- require.Equal(t, true, sc.Compression().EstimateCompressibility)
- require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold)
+ require.Equal(t, true, sc.Compress())
+ require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes())
+ require.Equal(t, true, sc.EstimateCompressibility())
+ require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold())
require.EqualValues(t, 102400, sc.SmallSizeLimit())
require.Equal(t, 2, len(ss))
@@ -136,86 +118,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
-
- readLimits := limits.ToConfig().Read
- writeLimits := limits.ToConfig().Write
- require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
- require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
- require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
- require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
- require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
- require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
- require.ElementsMatch(t, readLimits.Tags,
- []qos.IOTagConfig{
- {
- Tag: "internal",
- Weight: toPtr(20),
- ReservedOps: toPtr(1000),
- LimitOps: toPtr(0),
- },
- {
- Tag: "client",
- Weight: toPtr(70),
- ReservedOps: toPtr(10000),
- },
- {
- Tag: "background",
- Weight: toPtr(5),
- LimitOps: toPtr(10000),
- ReservedOps: toPtr(0),
- },
- {
- Tag: "writecache",
- Weight: toPtr(5),
- LimitOps: toPtr(25000),
- },
- {
- Tag: "policer",
- Weight: toPtr(5),
- LimitOps: toPtr(25000),
- Prohibited: true,
- },
- {
- Tag: "treesync",
- Weight: toPtr(5),
- LimitOps: toPtr(25),
- },
- })
- require.ElementsMatch(t, writeLimits.Tags,
- []qos.IOTagConfig{
- {
- Tag: "internal",
- Weight: toPtr(200),
- ReservedOps: toPtr(100),
- LimitOps: toPtr(0),
- },
- {
- Tag: "client",
- Weight: toPtr(700),
- ReservedOps: toPtr(1000),
- },
- {
- Tag: "background",
- Weight: toPtr(50),
- LimitOps: toPtr(1000),
- ReservedOps: toPtr(0),
- },
- {
- Tag: "writecache",
- Weight: toPtr(50),
- LimitOps: toPtr(2500),
- },
- {
- Tag: "policer",
- Weight: toPtr(50),
- LimitOps: toPtr(2500),
- },
- {
- Tag: "treesync",
- Weight: toPtr(50),
- LimitOps: toPtr(100),
- },
- })
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm())
@@ -238,9 +140,8 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, 200, meta.BoltDB().MaxBatchSize())
require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay())
- require.Equal(t, false, sc.Compression().Enabled)
- require.Equal(t, compression.LevelDefault, sc.Compression().Level)
- require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes)
+ require.Equal(t, false, sc.Compress())
+ require.Equal(t, []string(nil), sc.UncompressableContentTypes())
require.EqualValues(t, 102400, sc.SmallSizeLimit())
require.Equal(t, 2, len(ss))
@@ -271,17 +172,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
-
- readLimits := limits.ToConfig().Read
- writeLimits := limits.ToConfig().Write
- require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout)
- require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps)
- require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps)
- require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout)
- require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps)
- require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps)
- require.Equal(t, 0, len(readLimits.Tags))
- require.Equal(t, 0, len(writeLimits.Tags))
}
return nil
})
@@ -295,7 +185,3 @@ func TestEngineSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}
-
-func toPtr(v float64) *float64 {
- return &v
-}
diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
index b564d36f8..a51308b5b 100644
--- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
+++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
@@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
- return max(d, 0)
+ if d < 0 {
+ d = 0
+ }
+ return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
- return max(s, 0)
+ if s < 0 {
+ s = 0
+ }
+ return s
}
// NoSync returns the value of "no_sync" config parameter.
@@ -60,5 +66,8 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) PageSize() int {
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
- return max(s, 0)
+ if s < 0 {
+ s = 0
+ }
+ return s
}
diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go
index d42646da7..0620c9f63 100644
--- a/cmd/frostfs-node/config/engine/shard/config.go
+++ b/cmd/frostfs-node/config/engine/shard/config.go
@@ -4,11 +4,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
- limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
@@ -28,27 +26,42 @@ func From(c *config.Config) *Config {
return (*Config)(c)
}
-func (x *Config) Compression() compression.Config {
- cc := (*config.Config)(x).Sub("compression")
- if cc == nil {
- return compression.Config{}
- }
- return compression.Config{
- Enabled: config.BoolSafe(cc, "enabled"),
- UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"),
- Level: compression.Level(config.StringSafe(cc, "level")),
- EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"),
- EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc),
- }
+// Compress returns the value of "compress" config parameter.
+//
+// Returns false if the value is not a valid bool.
+func (x *Config) Compress() bool {
+ return config.BoolSafe(
+ (*config.Config)(x),
+ "compress",
+ )
+}
+
+// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter.
+//
+// Returns nil if a the value is missing or is invalid.
+func (x *Config) UncompressableContentTypes() []string {
+ return config.StringSliceSafe(
+ (*config.Config)(x),
+ "compression_exclude_content_types")
+}
+
+// EstimateCompressibility returns the value of "estimate_compressibility" config parameter.
+//
+// Returns false if the value is not a valid bool.
+func (x *Config) EstimateCompressibility() bool {
+ return config.BoolSafe(
+ (*config.Config)(x),
+ "compression_estimate_compressibility",
+ )
}
// EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter.
//
// Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0].
-func estimateCompressibilityThreshold(c *config.Config) float64 {
+func (x *Config) EstimateCompressibilityThreshold() float64 {
v := config.FloatOrDefault(
- c,
- "estimate_compressibility_threshold",
+ (*config.Config)(x),
+ "compression_estimate_compressibility_threshold",
EstimateCompressibilityThresholdDefault)
if v < 0.0 || v > 1.0 {
return EstimateCompressibilityThresholdDefault
@@ -112,14 +125,6 @@ func (x *Config) GC() *gcconfig.Config {
)
}
-// Limits returns "limits" subsection as a limitsconfig.Config.
-func (x *Config) Limits() *limitsconfig.Config {
- return limitsconfig.From(
- (*config.Config)(x).
- Sub("limits"),
- )
-}
-
// RefillMetabase returns the value of "resync_metabase" config parameter.
//
// Returns false if the value is not a valid bool.
diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go
deleted file mode 100644
index ccd1e0000..000000000
--- a/cmd/frostfs-node/config/engine/shard/limits/config.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package limits
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "github.com/spf13/cast"
-)
-
-// From wraps config section into Config.
-func From(c *config.Config) *Config {
- return (*Config)(c)
-}
-
-// Config is a wrapper over the config section
-// which provides access to Shard's limits configurations.
-type Config config.Config
-
-func (x *Config) ToConfig() qos.LimiterConfig {
- result := qos.LimiterConfig{
- Read: x.read(),
- Write: x.write(),
- }
- panicOnErr(result.Validate())
- return result
-}
-
-func (x *Config) read() qos.OpConfig {
- return x.parse("read")
-}
-
-func (x *Config) write() qos.OpConfig {
- return x.parse("write")
-}
-
-func (x *Config) parse(sub string) qos.OpConfig {
- c := (*config.Config)(x).Sub(sub)
- var result qos.OpConfig
-
- if s := config.Int(c, "max_waiting_ops"); s > 0 {
- result.MaxWaitingOps = s
- } else {
- result.MaxWaitingOps = qos.NoLimit
- }
-
- if s := config.Int(c, "max_running_ops"); s > 0 {
- result.MaxRunningOps = s
- } else {
- result.MaxRunningOps = qos.NoLimit
- }
-
- if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
- result.IdleTimeout = s
- } else {
- result.IdleTimeout = qos.DefaultIdleTimeout
- }
-
- result.Tags = tags(c)
-
- return result
-}
-
-func tags(c *config.Config) []qos.IOTagConfig {
- c = c.Sub("tags")
- var result []qos.IOTagConfig
- for i := 0; ; i++ {
- tag := config.String(c, strconv.Itoa(i)+".tag")
- if tag == "" {
- return result
- }
-
- var tagConfig qos.IOTagConfig
- tagConfig.Tag = tag
-
- v := c.Value(strconv.Itoa(i) + ".weight")
- if v != nil {
- w, err := cast.ToFloat64E(v)
- panicOnErr(err)
- tagConfig.Weight = &w
- }
-
- v = c.Value(strconv.Itoa(i) + ".limit_ops")
- if v != nil {
- l, err := cast.ToFloat64E(v)
- panicOnErr(err)
- tagConfig.LimitOps = &l
- }
-
- v = c.Value(strconv.Itoa(i) + ".reserved_ops")
- if v != nil {
- r, err := cast.ToFloat64E(v)
- panicOnErr(err)
- tagConfig.ReservedOps = &r
- }
-
- v = c.Value(strconv.Itoa(i) + ".prohibited")
- if v != nil {
- r, err := cast.ToBoolE(v)
- panicOnErr(err)
- tagConfig.Prohibited = r
- }
-
- result = append(result, tagConfig)
- }
-}
-
-func panicOnErr(err error) {
- if err != nil {
- panic(err)
- }
-}
diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go
index 5d4e8f408..28671ca55 100644
--- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go
+++ b/cmd/frostfs-node/config/engine/shard/pilorama/config.go
@@ -52,7 +52,10 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
- return max(d, 0)
+ if d <= 0 {
+ d = 0
+ }
+ return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@@ -60,5 +63,8 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
- return max(s, 0)
+ if s <= 0 {
+ s = 0
+ }
+ return s
}
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.env b/cmd/frostfs-node/config/engine/testdata/shards.env
deleted file mode 100644
index 079789b0f..000000000
--- a/cmd/frostfs-node/config/engine/testdata/shards.env
+++ /dev/null
@@ -1,3 +0,0 @@
-FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc
-FROSTFS_STORAGE_SHARD_1_MODE=disabled
-FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.json b/cmd/frostfs-node/config/engine/testdata/shards.json
deleted file mode 100644
index b3d6abe85..000000000
--- a/cmd/frostfs-node/config/engine/testdata/shards.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "storage.shard": {
- "0": {
- "metabase.path": "abc"
- },
- "1": {
- "mode": "disabled"
- },
- "2": {
- "metabase.path": "xyz"
- }
- }
-}
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.yaml b/cmd/frostfs-node/config/engine/testdata/shards.yaml
deleted file mode 100644
index bbbba3af8..000000000
--- a/cmd/frostfs-node/config/engine/testdata/shards.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-storage.shard:
- 0:
- metabase.path: abc
- 1:
- mode: disabled
- 2:
- metabase.path: xyz
diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go
index 20f373184..378b9d793 100644
--- a/cmd/frostfs-node/config/logger/config.go
+++ b/cmd/frostfs-node/config/logger/config.go
@@ -2,7 +2,6 @@ package loggerconfig
import (
"os"
- "strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
@@ -53,29 +52,6 @@ func Destination(c *config.Config) string {
return DestinationDefault
}
-// Timestamp returns the value of "timestamp" config parameter
-// from "logger" section.
-//
-// Returns false if the value isn't specified.
-func Timestamp(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "timestamp")
-}
-
-// Tags returns the value of "tags" config parameter from "logger" section.
-func Tags(c *config.Config) [][]string {
- var res [][]string
- sub := c.Sub(subsection).Sub("tags")
- for i := 0; ; i++ {
- s := sub.Sub(strconv.FormatInt(int64(i), 10))
- names := config.StringSafe(s, "names")
- if names == "" {
- break
- }
- res = append(res, []string{names, config.StringSafe(s, "level")})
- }
- return res
-}
-
// ToLokiConfig extracts loki config.
func ToLokiConfig(c *config.Config) loki.Config {
hostname, _ := os.Hostname()
diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go
index 796ad529e..3587a0ddb 100644
--- a/cmd/frostfs-node/config/logger/config_test.go
+++ b/cmd/frostfs-node/config/logger/config_test.go
@@ -13,7 +13,6 @@ func TestLoggerSection_Level(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
require.Equal(t, loggerconfig.LevelDefault, loggerconfig.Level(configtest.EmptyConfig()))
require.Equal(t, loggerconfig.DestinationDefault, loggerconfig.Destination(configtest.EmptyConfig()))
- require.Equal(t, false, loggerconfig.Timestamp(configtest.EmptyConfig()))
})
const path = "../../../../config/example/node"
@@ -21,10 +20,6 @@ func TestLoggerSection_Level(t *testing.T) {
fileConfigTest := func(c *config.Config) {
require.Equal(t, "debug", loggerconfig.Level(c))
require.Equal(t, "journald", loggerconfig.Destination(c))
- require.Equal(t, true, loggerconfig.Timestamp(c))
- tags := loggerconfig.Tags(c)
- require.Equal(t, "main, morph", tags[0][0])
- require.Equal(t, "debug", tags[0][1])
}
configtest.ForEachFileType(path, fileConfigTest)
diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go
index a9f774d18..1c536a0e2 100644
--- a/cmd/frostfs-node/config/morph/config.go
+++ b/cmd/frostfs-node/config/morph/config.go
@@ -30,12 +30,6 @@ const (
// FrostfsIDCacheSizeDefault is a default value of APE chain cache.
FrostfsIDCacheSizeDefault = 10_000
-
- // ContainerCacheSizeDefault represents the default size for the container cache.
- ContainerCacheSizeDefault = 100
-
- // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates.
- PollCandidatesTimeoutDefault = 20 * time.Second
)
var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
@@ -109,18 +103,6 @@ func CacheTTL(c *config.Config) time.Duration {
return CacheTTLDefault
}
-// ContainerCacheSize returns the value of "container_cache_size" config parameter
-// from "morph" section.
-//
-// Returns 0 if the value is not positive integer.
-// Returns ContainerCacheSizeDefault if the value is missing.
-func ContainerCacheSize(c *config.Config) uint32 {
- if c.Sub(subsection).Value("container_cache_size") == nil {
- return ContainerCacheSizeDefault
- }
- return config.Uint32Safe(c.Sub(subsection), "container_cache_size")
-}
-
// SwitchInterval returns the value of "switch_interval" config parameter
// from "morph" section.
//
@@ -157,17 +139,3 @@ func FrostfsIDCacheSize(c *config.Config) uint32 {
}
return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size")
}
-
-// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter
-// from "morph" section.
-//
-// Returns PollCandidatesTimeoutDefault if the value is not positive duration.
-func NetmapCandidatesPollInterval(c *config.Config) time.Duration {
- v := config.DurationSafe(c.Sub(subsection).
- Sub("netmap").Sub("candidates"), "poll_interval")
- if v > 0 {
- return v
- }
-
- return PollCandidatesTimeoutDefault
-}
diff --git a/cmd/frostfs-node/config/multinet/config.go b/cmd/frostfs-node/config/multinet/config.go
deleted file mode 100644
index f598efc51..000000000
--- a/cmd/frostfs-node/config/multinet/config.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package multinet
-
-import (
- "strconv"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
-)
-
-const (
- subsection = "multinet"
-
- FallbackDelayDefault = 300 * time.Millisecond
-)
-
-// Enabled returns the value of "enabled" config parameter from "multinet" section.
-func Enabled(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "enabled")
-}
-
-type Subnet struct {
- Mask string
- SourceIPs []string
-}
-
-// Subnets returns the value of "subnets" config parameter from "multinet" section.
-func Subnets(c *config.Config) []Subnet {
- var result []Subnet
- sub := c.Sub(subsection).Sub("subnets")
- for i := 0; ; i++ {
- s := sub.Sub(strconv.FormatInt(int64(i), 10))
- mask := config.StringSafe(s, "mask")
- if mask == "" {
- break
- }
- sourceIPs := config.StringSliceSafe(s, "source_ips")
- result = append(result, Subnet{
- Mask: mask,
- SourceIPs: sourceIPs,
- })
- }
- return result
-}
-
-// Balancer returns the value of "balancer" config parameter from "multinet" section.
-func Balancer(c *config.Config) string {
- return config.StringSafe(c.Sub(subsection), "balancer")
-}
-
-// Restrict returns the value of "restrict" config parameter from "multinet" section.
-func Restrict(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "restrict")
-}
-
-// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section.
-func FallbackDelay(c *config.Config) time.Duration {
- fd := config.DurationSafe(c.Sub(subsection), "fallback_delay")
- if fd != 0 { // negative value means no fallback
- return fd
- }
- return FallbackDelayDefault
-}
diff --git a/cmd/frostfs-node/config/multinet/config_test.go b/cmd/frostfs-node/config/multinet/config_test.go
deleted file mode 100644
index 5f7dc6d53..000000000
--- a/cmd/frostfs-node/config/multinet/config_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package multinet
-
-import (
- "testing"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestMultinetSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- empty := configtest.EmptyConfig()
- require.Equal(t, false, Enabled(empty))
- require.Equal(t, ([]Subnet)(nil), Subnets(empty))
- require.Equal(t, "", Balancer(empty))
- require.Equal(t, false, Restrict(empty))
- require.Equal(t, FallbackDelayDefault, FallbackDelay(empty))
- })
-
- const path = "../../../../config/example/node"
-
- fileConfigTest := func(c *config.Config) {
- require.Equal(t, true, Enabled(c))
- require.Equal(t, []Subnet{
- {
- Mask: "192.168.219.174/24",
- SourceIPs: []string{
- "192.168.218.185",
- "192.168.219.185",
- },
- },
- {
- Mask: "10.78.70.74/24",
- SourceIPs: []string{
- "10.78.70.185",
- "10.78.71.185",
- },
- },
- }, Subnets(c))
- require.Equal(t, "roundrobin", Balancer(c))
- require.Equal(t, false, Restrict(c))
- require.Equal(t, 350*time.Millisecond, FallbackDelay(c))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go
index c50718c5f..4d063245b 100644
--- a/cmd/frostfs-node/config/node/config.go
+++ b/cmd/frostfs-node/config/node/config.go
@@ -3,9 +3,7 @@ package nodeconfig
import (
"fmt"
"io/fs"
- "iter"
"os"
- "slices"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
@@ -90,8 +88,12 @@ func Wallet(c *config.Config) *keys.PrivateKey {
type stringAddressGroup []string
-func (x stringAddressGroup) Addresses() iter.Seq[string] {
- return slices.Values(x)
+func (x stringAddressGroup) IterateAddresses(f func(string) bool) {
+ for i := range x {
+ if f(x[i]) {
+ break
+ }
+ }
}
func (x stringAddressGroup) NumberOfAddresses() int {
@@ -131,6 +133,14 @@ func Attributes(c *config.Config) (attrs []string) {
return
}
+// Relay returns the value of "relay" config parameter
+// from "node" section.
+//
+// Returns false if the value is not set.
+func Relay(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "relay")
+}
+
// PersistentSessions returns structure that provides access to "persistent_sessions"
// subsection of "node" section.
func PersistentSessions(c *config.Config) PersistentSessionsConfig {
@@ -188,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
//
// Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
- p := config.UintSafe(l.cfg, "perm")
+ p := config.UintSafe((*config.Config)(l.cfg), "perm")
if p == 0 {
p = PermDefault
}
@@ -200,15 +210,10 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
//
// Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool {
- return config.BoolSafe(l.cfg, "no_sync")
+ return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
}
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.
func CompatibilityMode(c *config.Config) bool {
return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode")
}
-
-// LocodeDBPath returns path to LOCODE database.
-func LocodeDBPath(c *config.Config) string {
- return config.String(c.Sub(subsection), "locode_db_path")
-}
diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go
index 9af1dc038..7b9adecf4 100644
--- a/cmd/frostfs-node/config/node/config_test.go
+++ b/cmd/frostfs-node/config/node/config_test.go
@@ -29,10 +29,12 @@ func TestNodeSection(t *testing.T) {
)
attribute := Attributes(empty)
+ relay := Relay(empty)
persisessionsPath := PersistentSessions(empty).Path()
persistatePath := PersistentState(empty).Path()
require.Empty(t, attribute)
+ require.Equal(t, false, relay)
require.Equal(t, "", persisessionsPath)
require.Equal(t, PersistentStatePathDefault, persistatePath)
})
@@ -43,6 +45,7 @@ func TestNodeSection(t *testing.T) {
key := Key(c)
addrs := BootstrapAddresses(c)
attributes := Attributes(c)
+ relay := Relay(c)
wKey := Wallet(c)
persisessionsPath := PersistentSessions(c).Path()
persistatePath := PersistentState(c).Path()
@@ -84,6 +87,8 @@ func TestNodeSection(t *testing.T) {
return false
})
+ require.Equal(t, true, relay)
+
require.Len(t, attributes, 2)
require.Equal(t, "Price:11", attributes[0])
require.Equal(t, "UN-LOCODE:RU MSK", attributes[1])
diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go
index c8c967d30..876dc3ef1 100644
--- a/cmd/frostfs-node/config/object/config.go
+++ b/cmd/frostfs-node/config/object/config.go
@@ -10,17 +10,14 @@ type PutConfig struct {
cfg *config.Config
}
-// GetConfig is a wrapper over "get" config section which provides access
-// to object get pipeline configuration of object service.
-type GetConfig struct {
- cfg *config.Config
-}
-
const (
subsection = "object"
putSubsection = "put"
- getSubsection = "get"
+
+ // PutPoolSizeDefault is a default value of routine pool size to
+ // process object.Put requests in object service.
+ PutPoolSizeDefault = 10
)
// Put returns structure that provides access to "put" subsection of
@@ -31,20 +28,31 @@ func Put(c *config.Config) PutConfig {
}
}
+// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
+//
+// Returns PutPoolSizeDefault if the value is not a positive number.
+func (g PutConfig) PoolSizeRemote() int {
+ v := config.Int(g.cfg, "remote_pool_size")
+ if v > 0 {
+ return int(v)
+ }
+
+ return PutPoolSizeDefault
+}
+
+// PoolSizeLocal returns the value of "local_pool_size" config parameter.
+//
+// Returns PutPoolSizeDefault if the value is not a positive number.
+func (g PutConfig) PoolSizeLocal() int {
+ v := config.Int(g.cfg, "local_pool_size")
+ if v > 0 {
+ return int(v)
+ }
+
+ return PutPoolSizeDefault
+}
+
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
}
-
-// Get returns structure that provides access to "get" subsection of
-// "object" section.
-func Get(c *config.Config) GetConfig {
- return GetConfig{
- c.Sub(subsection).Sub(getSubsection),
- }
-}
-
-// Priority returns the value of "priority" config parameter.
-func (g GetConfig) Priority() []string {
- return config.StringSliceSafe(g.cfg, "priority")
-}
diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go
index 1c525ef55..e2bb105d9 100644
--- a/cmd/frostfs-node/config/object/config_test.go
+++ b/cmd/frostfs-node/config/object/config_test.go
@@ -13,6 +13,8 @@ func TestObjectSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
+ require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
+ require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
})
@@ -20,6 +22,8 @@ func TestObjectSection(t *testing.T) {
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
+ require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
+ require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
}
diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go
index 6c3e8adab..191694970 100644
--- a/cmd/frostfs-node/config/profiler/config.go
+++ b/cmd/frostfs-node/config/profiler/config.go
@@ -52,7 +52,7 @@ func Address(c *config.Config) string {
return AddressDefault
}
-// BlockRate returns the value of "block_rate" config parameter
+// BlockRates returns the value of "block_rate" config parameter
// from "pprof" section.
func BlockRate(c *config.Config) int {
s := c.Sub(subsection)
diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go
deleted file mode 100644
index 85f8180ed..000000000
--- a/cmd/frostfs-node/config/qos/config.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package qos
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-const (
- subsection = "qos"
- criticalSubSection = "critical"
- internalSubSection = "internal"
-)
-
-// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config
-// parameter from "qos" section.
-//
-// Returns an empty list if not set.
-func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys {
- return authorizedKeys(c, criticalSubSection)
-}
-
-// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config
-// parameter from "qos" section.
-//
-// Returns an empty list if not set.
-func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys {
- return authorizedKeys(c, internalSubSection)
-}
-
-func authorizedKeys(c *config.Config, sub string) keys.PublicKeys {
- strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys")
- pubs := make(keys.PublicKeys, 0, len(strKeys))
-
- for i := range strKeys {
- pub, err := keys.NewPublicKeyFromString(strKeys[i])
- if err != nil {
- panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err))
- }
-
- pubs = append(pubs, pub)
- }
-
- return pubs
-}
diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go
deleted file mode 100644
index b3b6019cc..000000000
--- a/cmd/frostfs-node/config/qos/config_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package qos
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-func TestQoSSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- empty := configtest.EmptyConfig()
-
- require.Empty(t, CriticalAuthorizedKeys(empty))
- require.Empty(t, InternalAuthorizedKeys(empty))
- })
-
- const path = "../../../../config/example/node"
-
- criticalPubs := make(keys.PublicKeys, 2)
- criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
- criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
-
- internalPubs := make(keys.PublicKeys, 2)
- internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2")
- internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a")
-
- fileConfigTest := func(c *config.Config) {
- require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c))
- require.Equal(t, internalPubs, InternalAuthorizedKeys(c))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go
index e954bf19d..0fbac935c 100644
--- a/cmd/frostfs-node/config/replicator/config.go
+++ b/cmd/frostfs-node/config/replicator/config.go
@@ -11,8 +11,6 @@ const (
// PutTimeoutDefault is a default timeout of object put request in replicator.
PutTimeoutDefault = 5 * time.Second
- // PoolSizeDefault is a default pool size for put request in replicator.
- PoolSizeDefault = 10
)
// PutTimeout returns the value of "put_timeout" config parameter
@@ -30,13 +28,6 @@ func PutTimeout(c *config.Config) time.Duration {
// PoolSize returns the value of "pool_size" config parameter
// from "replicator" section.
-//
-// Returns PoolSizeDefault if the value is non-positive integer.
func PoolSize(c *config.Config) int {
- v := int(config.IntSafe(c.Sub(subsection), "pool_size"))
- if v > 0 {
- return v
- }
-
- return PoolSizeDefault
+ return int(config.IntSafe(c.Sub(subsection), "pool_size"))
}
diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go
index 2aa490946..2129c01b4 100644
--- a/cmd/frostfs-node/config/replicator/config_test.go
+++ b/cmd/frostfs-node/config/replicator/config_test.go
@@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
- require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty))
+ require.Equal(t, 0, replicatorconfig.PoolSize(empty))
})
const path = "../../../../config/example/node"
diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go
deleted file mode 100644
index e0efdfde2..000000000
--- a/cmd/frostfs-node/config/rpc/config.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package rpcconfig
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
-)
-
-const (
- subsection = "rpc"
- limitsSubsection = "limits"
-)
-
-type LimitConfig struct {
- Methods []string
- MaxOps int64
-}
-
-// Limits returns the "limits" config from "rpc" section.
-func Limits(c *config.Config) []LimitConfig {
- c = c.Sub(subsection).Sub(limitsSubsection)
-
- var limits []LimitConfig
-
- for i := uint64(0); ; i++ {
- si := strconv.FormatUint(i, 10)
- sc := c.Sub(si)
-
- methods := config.StringSliceSafe(sc, "methods")
- if len(methods) == 0 {
- break
- }
-
- if sc.Value("max_ops") == nil {
- panic("no max operations for method group")
- }
-
- limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
- }
-
- return limits
-}
diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go
deleted file mode 100644
index a6365e19f..000000000
--- a/cmd/frostfs-node/config/rpc/config_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package rpcconfig
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestRPCSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- require.Empty(t, Limits(configtest.EmptyConfig()))
- })
-
- t.Run("correct config", func(t *testing.T) {
- const path = "../../../../config/example/node"
-
- fileConfigTest := func(c *config.Config) {
- limits := Limits(c)
- require.Len(t, limits, 2)
-
- limit0 := limits[0]
- limit1 := limits[1]
-
- require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
- require.Equal(t, limit0.MaxOps, int64(1000))
-
- require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
- require.Equal(t, limit1.MaxOps, int64(10000))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
- })
-
- t.Run("no max operations", func(t *testing.T) {
- const path = "testdata/no_max_ops"
-
- fileConfigTest := func(c *config.Config) {
- require.Panics(t, func() { _ = Limits(c) })
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
- })
-
- t.Run("zero max operations", func(t *testing.T) {
- const path = "testdata/zero_max_ops"
-
- fileConfigTest := func(c *config.Config) {
- limits := Limits(c)
- require.Len(t, limits, 2)
-
- limit0 := limits[0]
- limit1 := limits[1]
-
- require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
- require.Equal(t, limit0.MaxOps, int64(0))
-
- require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
- require.Equal(t, limit1.MaxOps, int64(10000))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
- })
-}
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env
deleted file mode 100644
index 2fed4c5bc..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env
+++ /dev/null
@@ -1,3 +0,0 @@
-FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
-FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
-FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json
deleted file mode 100644
index 6156aa71d..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "rpc": {
- "limits": [
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/PutSingle",
- "/neo.fs.v2.object.ObjectService/Put"
- ]
- },
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/Get"
- ],
- "max_ops": 10000
- }
- ]
- }
-}
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml
deleted file mode 100644
index e50b7ae93..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-rpc:
- limits:
- - methods:
- - /neo.fs.v2.object.ObjectService/PutSingle
- - /neo.fs.v2.object.ObjectService/Put
- - methods:
- - /neo.fs.v2.object.ObjectService/Get
- max_ops: 10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
deleted file mode 100644
index ce7302b0b..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
+++ /dev/null
@@ -1,4 +0,0 @@
-FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
-FROSTFS_RPC_LIMITS_0_MAX_OPS=0
-FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
-FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
deleted file mode 100644
index 16a1c173f..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "rpc": {
- "limits": [
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/PutSingle",
- "/neo.fs.v2.object.ObjectService/Put"
- ],
- "max_ops": 0
- },
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/Get"
- ],
- "max_ops": 10000
- }
- ]
- }
-}
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
deleted file mode 100644
index 525d768d4..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-rpc:
- limits:
- - methods:
- - /neo.fs.v2.object.ObjectService/PutSingle
- - /neo.fs.v2.object.ObjectService/Put
- max_ops: 0
- - methods:
- - /neo.fs.v2.object.ObjectService/Get
- max_ops: 10000
diff --git a/cmd/frostfs-node/config/test/config.go b/cmd/frostfs-node/config/test/config.go
index e98c032f0..a93d7e648 100644
--- a/cmd/frostfs-node/config/test/config.go
+++ b/cmd/frostfs-node/config/test/config.go
@@ -11,6 +11,8 @@ import (
)
func fromFile(path string) *config.Config {
+ os.Clearenv() // ENVs have priority over config files, so we do this in tests
+
return config.New(path, "", "")
}
@@ -38,6 +40,15 @@ func ForEachFileType(pref string, f func(*config.Config)) {
// ForEnvFileType creates config from `.env` file.
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
+ envs := os.Environ()
+ t.Cleanup(func() {
+ os.Clearenv()
+ for _, env := range envs {
+ keyValue := strings.Split(env, "=")
+ os.Setenv(keyValue[0], keyValue[1])
+ }
+ })
+
f(fromEnvFile(t, pref+".env"))
}
@@ -62,6 +73,7 @@ func loadEnv(t testing.TB, path string) {
v = strings.Trim(v, `"`)
- t.Setenv(k, v)
+ err = os.Setenv(k, v)
+ require.NoError(t, err, "can't set environment variable")
}
}
diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go
index 91ef669ee..8544c672c 100644
--- a/cmd/frostfs-node/config/tracing/config.go
+++ b/cmd/frostfs-node/config/tracing/config.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"os"
- "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
@@ -25,7 +24,6 @@ func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
Service: "frostfs-node",
InstanceID: getInstanceIDOrDefault(c),
Version: misc.Version,
- Attributes: make(map[string]string),
}
if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" {
@@ -40,30 +38,11 @@ func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
}
conf.ServerCaCertPool = certPool
}
-
- i := uint64(0)
- for ; ; i++ {
- si := strconv.FormatUint(i, 10)
- ac := c.Sub(subsection).Sub("attributes").Sub(si)
- k := config.StringSafe(ac, "key")
- if k == "" {
- break
- }
- v := config.StringSafe(ac, "value")
- if v == "" {
- return nil, fmt.Errorf("empty tracing attribute value for key %s", k)
- }
- if _, ok := conf.Attributes[k]; ok {
- return nil, fmt.Errorf("tracing attribute key %s defined more than once", k)
- }
- conf.Attributes[k] = v
- }
-
return conf, nil
}
func getInstanceIDOrDefault(c *config.Config) string {
- s := config.StringSliceSafe(c.Sub("node"), "addresses")
+ s := config.StringSlice(c.Sub("node"), "addresses")
if len(s) > 0 {
return s[0]
}
diff --git a/cmd/frostfs-node/config/tracing/config_test.go b/cmd/frostfs-node/config/tracing/config_test.go
deleted file mode 100644
index 8e485ca6e..000000000
--- a/cmd/frostfs-node/config/tracing/config_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package tracing
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "github.com/stretchr/testify/require"
-)
-
-func TestTracingSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- tc, err := ToTracingConfig(configtest.EmptyConfig())
- require.NoError(t, err)
- require.Equal(t, false, tc.Enabled)
- require.Equal(t, tracing.Exporter(""), tc.Exporter)
- require.Equal(t, "", tc.Endpoint)
- require.Equal(t, "frostfs-node", tc.Service)
- require.Equal(t, "", tc.InstanceID)
- require.Nil(t, tc.ServerCaCertPool)
- require.Empty(t, tc.Attributes)
- })
-
- const path = "../../../../config/example/node"
-
- fileConfigTest := func(c *config.Config) {
- tc, err := ToTracingConfig(c)
- require.NoError(t, err)
- require.Equal(t, true, tc.Enabled)
- require.Equal(t, tracing.OTLPgRPCExporter, tc.Exporter)
- require.Equal(t, "localhost", tc.Endpoint)
- require.Equal(t, "frostfs-node", tc.Service)
- require.Nil(t, tc.ServerCaCertPool)
- require.EqualValues(t, map[string]string{
- "key0": "value",
- "key1": "value",
- }, tc.Attributes)
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/tree/config.go b/cmd/frostfs-node/config/tree/config.go
index da877791e..8a8919999 100644
--- a/cmd/frostfs-node/config/tree/config.go
+++ b/cmd/frostfs-node/config/tree/config.go
@@ -10,8 +10,6 @@ import (
const (
subsection = "tree"
-
- SyncBatchSizeDefault = 1000
)
// TreeConfig is a wrapper over "tree" config section
@@ -76,17 +74,6 @@ func (c TreeConfig) SyncInterval() time.Duration {
return config.DurationSafe(c.cfg, "sync_interval")
}
-// SyncBatchSize returns the value of "sync_batch_size"
-// config parameter from the "tree" section.
-//
-// Returns `SyncBatchSizeDefault` if config value is not specified.
-func (c TreeConfig) SyncBatchSize() int {
- if v := config.IntSafe(c.cfg, "sync_batch_size"); v > 0 {
- return int(v)
- }
- return SyncBatchSizeDefault
-}
-
// AuthorizedKeys parses and returns an array of "authorized_keys" config
// parameter from "tree" section.
//
diff --git a/cmd/frostfs-node/config/tree/config_test.go b/cmd/frostfs-node/config/tree/config_test.go
index 6628b8878..285ea0725 100644
--- a/cmd/frostfs-node/config/tree/config_test.go
+++ b/cmd/frostfs-node/config/tree/config_test.go
@@ -44,7 +44,6 @@ func TestTreeSection(t *testing.T) {
require.Equal(t, 32, treeSec.ReplicationWorkerCount())
require.Equal(t, 5*time.Second, treeSec.ReplicationTimeout())
require.Equal(t, time.Hour, treeSec.SyncInterval())
- require.Equal(t, 2000, treeSec.SyncBatchSize())
require.Equal(t, expectedKeys, treeSec.AuthorizedKeys())
}
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index bdb280d87..5a29aac76 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -5,10 +5,9 @@ import (
"context"
"net"
- containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
+ containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
@@ -18,7 +17,6 @@ import (
containerTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/container/grpc"
containerService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
containerMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph"
- containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
@@ -29,10 +27,10 @@ import (
func initContainerService(_ context.Context, c *cfg) {
// container wrapper that tries to invoke notary
// requests if chain is configured so
- wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
+ wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
fatalOnErr(err)
- c.cnrClient = wrap
+ c.shared.cnrClient = wrap
cnrSrc := cntClient.AsContainerSource(wrap)
@@ -43,12 +41,11 @@ func initContainerService(_ context.Context, c *cfg) {
fatalOnErr(err)
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
- if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
- frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
+ if cacheSize > 0 {
+ frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL)
}
- c.frostfsidClient = frostfsIDSubjectProvider
- c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
+ c.shared.frostfsidClient = frostfsIDSubjectProvider
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
@@ -57,10 +54,8 @@ func initContainerService(_ context.Context, c *cfg) {
service := containerService.NewSignService(
&c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
- newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient,
- containerService.NewSplitterService(
- c.cfgContainer.containerBatchSize, c.respSvc,
- containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
+ newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
+ containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
),
)
service = containerService.NewAuditService(service, c.log, c.audit)
@@ -68,15 +63,16 @@ func initContainerService(_ context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
containerGRPC.RegisterContainerServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(containerGRPC.ContainerService_ServiceDesc), server)
})
c.cfgObject.cfgLocalStorage.localStorage.SetContainerSource(cnrRdr)
}
func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc containerCore.Source) (*morphContainerReader, *morphContainerWriter) {
+ eACLFetcher := &morphEACLFetcher{
+ w: client,
+ }
+
cnrRdr := new(morphContainerReader)
cnrWrt := &morphContainerWriter{
@@ -84,51 +80,57 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
}
if c.cfgMorph.cacheTTL <= 0 {
+ c.cfgObject.eaclSource = eACLFetcher
+ cnrRdr.eacl = eACLFetcher
c.cfgObject.cnrSource = cnrSrc
cnrRdr.src = cnrSrc
cnrRdr.lister = client
} else {
// use RPC node as source of Container contract items (with caching)
- c.cfgObject.cnrSource = cnrSrc
- if c.cfgMorph.containerCacheSize > 0 {
- containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
+ cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL)
+ cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
- subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) {
- ev := e.(containerEvent.PutSuccess)
+ subscribeToContainerCreation(c, func(e event.Event) {
+ ev := e.(containerEvent.PutSuccess)
- // read owner of the created container in order to update the reading cache.
- // TODO: use owner directly from the event after neofs-contract#256 will become resolved
- // but don't forget about the profit of reading the new container and caching it:
- // creation success are most commonly tracked by polling GET op.
- cnr, err := cnrSrc.Get(ctx, ev.ID)
- if err == nil {
- containerCache.containerCache.set(ev.ID, cnr, nil)
- } else {
- // unlike removal, we expect successful receive of the container
- // after successful creation, so logging can be useful
- c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
- zap.Stringer("id", ev.ID),
- zap.Error(err),
- )
- }
-
- c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt,
+ // read owner of the created container in order to update the reading cache.
+ // TODO: use owner directly from the event after neofs-contract#256 will become resolved
+ // but don't forget about the profit of reading the new container and caching it:
+ // creation success are most commonly tracked by polling GET op.
+ cnr, err := cnrSrc.Get(ev.ID)
+ if err == nil {
+ cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
+ } else {
+ // unlike removal, we expect successful receive of the container
+ // after successful creation, so logging can be useful
+ c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
zap.Stringer("id", ev.ID),
+ zap.Error(err),
)
- })
+ }
- subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
- ev := e.(containerEvent.DeleteSuccess)
- containerCache.handleRemoval(ev.ID)
- c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt,
- zap.Stringer("id", ev.ID),
- )
- })
- c.cfgObject.cnrSource = containerCache
- }
+ c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
+ zap.Stringer("id", ev.ID),
+ )
+ })
+
+ subscribeToContainerRemoval(c, func(e event.Event) {
+ ev := e.(containerEvent.DeleteSuccess)
+ cachedContainerStorage.handleRemoval(ev.ID)
+ c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
+ zap.Stringer("id", ev.ID),
+ )
+ })
+
+ c.cfgObject.eaclSource = cachedEACLStorage
+ c.cfgObject.cnrSource = cachedContainerStorage
cnrRdr.lister = client
+ cnrRdr.eacl = c.cfgObject.eaclSource
cnrRdr.src = c.cfgObject.cnrSource
+
+ cnrWrt.cacheEnabled = true
+ cnrWrt.eacls = cachedEACLStorage
}
return cnrRdr, cnrWrt
@@ -218,38 +220,42 @@ func (c *cfg) ExternalAddresses() []string {
// implements interface required by container service provided by morph executor.
type morphContainerReader struct {
+ eacl containerCore.EACLSource
+
src containerCore.Source
lister interface {
- ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
- IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
+ ContainersOf(*user.ID) ([]cid.ID, error)
}
}
-func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) {
- return x.src.Get(ctx, id)
+func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
+ return x.src.Get(id)
}
-func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) {
- return x.src.DeletionInfo(ctx, id)
+func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
+ return x.src.DeletionInfo(id)
}
-func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) {
- return x.lister.ContainersOf(ctx, id)
+func (x *morphContainerReader) GetEACL(id cid.ID) (*containerCore.EACL, error) {
+ return x.eacl.GetEACL(id)
}
-func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error {
- return x.lister.IterateContainersOf(ctx, id, processCID)
+func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
+ return x.lister.ContainersOf(id)
}
type morphContainerWriter struct {
neoClient *cntClient.Client
+
+ cacheEnabled bool
+ eacls ttlEACLStorage
}
-func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) {
- return cntClient.Put(ctx, m.neoClient, cnr)
+func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
+ return cntClient.Put(m.neoClient, cnr)
}
-func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error {
- return cntClient.Delete(ctx, m.neoClient, witness)
+func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error {
+ return cntClient.Delete(m.neoClient, witness)
}
diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go
index 1825013c7..e1e6e3ac9 100644
--- a/cmd/frostfs-node/control.go
+++ b/cmd/frostfs-node/control.go
@@ -7,19 +7,16 @@ import (
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
- metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
- tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"go.uber.org/zap"
"google.golang.org/grpc"
)
const serviceNameControl = "control"
-func initControlService(ctx context.Context, c *cfg) {
+func initControlService(c *cfg) {
endpoint := controlconfig.GRPC(c.appCfg).Endpoint()
if endpoint == controlconfig.GRPCEndpointDefault {
return
@@ -49,28 +46,21 @@ func initControlService(ctx context.Context, c *cfg) {
lis, err := net.Listen("tcp", endpoint)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
return
}
- c.cfgControlService.server = grpc.NewServer(
- grpc.ChainUnaryInterceptor(
- qos.NewSetCriticalIOTagUnaryServerInterceptor(),
- metrics.NewUnaryServerInterceptor(),
- tracing.NewUnaryServerInterceptor(),
- ),
- // control service has no stream methods, so no stream interceptors added
- )
+ c.cfgControlService.server = grpc.NewServer()
c.onShutdown(func() {
- stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)
+ stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log)
})
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
- c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
+ c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", serviceNameControl),
zap.String("endpoint", endpoint))
fatalOnErr(c.cfgControlService.server.Serve(lis))
@@ -82,23 +72,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus {
return c.cfgNetmap.state.controlNetmapStatus()
}
-func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) {
- c.notifySystemd(ctx, st)
+func (c *cfg) setHealthStatus(st control.HealthStatus) {
+ c.notifySystemd(st)
c.healthStatus.Store(int32(st))
c.metricsCollector.State().SetHealth(int32(st))
}
-func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
+func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
- c.notifySystemd(ctx, newSt)
+ c.notifySystemd(newSt)
c.metricsCollector.State().SetHealth(int32(newSt))
}
return
}
-func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) {
+func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) {
old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
- c.notifySystemd(ctx, st)
+ c.notifySystemd(st)
c.metricsCollector.State().SetHealth(int32(st))
return
}
@@ -107,7 +97,7 @@ func (c *cfg) HealthStatus() control.HealthStatus {
return control.HealthStatus(c.healthStatus.Load())
}
-func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
+func (c *cfg) notifySystemd(st control.HealthStatus) {
if !c.sdNotify {
return
}
@@ -123,6 +113,6 @@ func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
if err != nil {
- c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
+ c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}
diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go
index d2d4e9785..fb55a6019 100644
--- a/cmd/frostfs-node/frostfsid.go
+++ b/cmd/frostfs-node/frostfsid.go
@@ -1,8 +1,6 @@
package main
import (
- "context"
- "strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
@@ -11,101 +9,57 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
)
-type subjectWithError struct {
- subject *client.Subject
- err error
-}
-
-type subjectExtWithError struct {
- subject *client.SubjectExtended
- err error
-}
-
type morphFrostfsIDCache struct {
subjProvider frostfsidcore.SubjectProvider
- subjCache *expirable.LRU[util.Uint160, subjectWithError]
+ subjCache *expirable.LRU[util.Uint160, *client.Subject]
- subjExtCache *expirable.LRU[util.Uint160, subjectExtWithError]
-
- metrics cacheMetrics
+ subjExtCache *expirable.LRU[util.Uint160, *client.SubjectExtended]
}
-func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration, metrics cacheMetrics) frostfsidcore.SubjectProvider {
+func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration) frostfsidcore.SubjectProvider {
return &morphFrostfsIDCache{
subjProvider: subjProvider,
- subjCache: expirable.NewLRU(size, func(util.Uint160, subjectWithError) {}, ttl),
+ subjCache: expirable.NewLRU(size, func(util.Uint160, *client.Subject) {}, ttl),
- subjExtCache: expirable.NewLRU(size, func(util.Uint160, subjectExtWithError) {}, ttl),
-
- metrics: metrics,
+ subjExtCache: expirable.NewLRU(size, func(util.Uint160, *client.SubjectExtended) {}, ttl),
}
}
-func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
- hit := false
- startedAt := time.Now()
- defer func() {
- m.metrics.AddMethodDuration("GetSubject", time.Since(startedAt), hit)
- }()
-
+func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
result, found := m.subjCache.Get(addr)
if found {
- hit = true
- return result.subject, result.err
+ return result, nil
}
- subj, err := m.subjProvider.GetSubject(ctx, addr)
+ result, err := m.subjProvider.GetSubject(addr)
if err != nil {
- if m.isCacheableError(err) {
- m.subjCache.Add(addr, subjectWithError{
- err: err,
- })
- }
return nil, err
}
- m.subjCache.Add(addr, subjectWithError{subject: subj})
- return subj, nil
+ m.subjCache.Add(addr, result)
+ return result, nil
}
-func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
- hit := false
- startedAt := time.Now()
- defer func() {
- m.metrics.AddMethodDuration("GetSubjectExtended", time.Since(startedAt), hit)
- }()
-
- result, found := m.subjExtCache.Get(addr)
+func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
+ subjExt, found := m.subjExtCache.Get(addr)
if found {
- hit = true
- return result.subject, result.err
+ return subjExt, nil
}
- subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr)
+ var err error
+ subjExt, err = m.subjProvider.GetSubjectExtended(addr)
if err != nil {
- if m.isCacheableError(err) {
- m.subjExtCache.Add(addr, subjectExtWithError{
- err: err,
- })
- m.subjCache.Add(addr, subjectWithError{
- err: err,
- })
- }
return nil, err
}
- m.subjExtCache.Add(addr, subjectExtWithError{subject: subjExt})
- m.subjCache.Add(addr, subjectWithError{subject: subjectFromSubjectExtended(subjExt)})
+ m.subjExtCache.Add(addr, subjExt)
+ m.subjCache.Add(addr, subjectFromSubjectExtended(subjExt))
return subjExt, nil
}
-func (m *morphFrostfsIDCache) isCacheableError(err error) bool {
- return strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage)
-}
-
func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject {
return &client.Subject{
PrimaryKey: subjExt.PrimaryKey,
diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go
index 6b6d44750..3a38b2cca 100644
--- a/cmd/frostfs-node/grpc.go
+++ b/cmd/frostfs-node/grpc.go
@@ -1,22 +1,16 @@
package main
import (
- "context"
"crypto/tls"
"errors"
- "fmt"
"net"
"time"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
- rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
- qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
@@ -24,11 +18,11 @@ import (
const maxRecvMsgSize = 256 << 20
-func initGRPC(ctx context.Context, c *cfg) {
+func initGRPC(c *cfg) {
var endpointsToReconnect []string
var successCount int
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
- serverOpts, ok := getGrpcServerOpts(ctx, c, sc)
+ serverOpts, ok := getGrpcServerOpts(c, sc)
if !ok {
return
}
@@ -36,7 +30,7 @@ func initGRPC(ctx context.Context, c *cfg) {
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
- c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
return
}
@@ -45,7 +39,7 @@ func initGRPC(ctx context.Context, c *cfg) {
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
- stopGRPC(ctx, "FrostFS Public API", srv, c.log)
+ stopGRPC("FrostFS Public API", srv, c.log)
})
c.cfgGRPC.append(sc.Endpoint(), lis, srv)
@@ -58,11 +52,11 @@ func initGRPC(ctx context.Context, c *cfg) {
c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg)
for _, endpoint := range endpointsToReconnect {
- scheduleReconnect(ctx, endpoint, c)
+ scheduleReconnect(endpoint, c)
}
}
-func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
+func scheduleReconnect(endpoint string, c *cfg) {
c.wg.Add(1)
go func() {
defer c.wg.Done()
@@ -71,7 +65,7 @@ func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
for {
select {
case <-t.C:
- if tryReconnect(ctx, endpoint, c) {
+ if tryReconnect(endpoint, c) {
return
}
case <-c.done:
@@ -81,20 +75,20 @@ func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
}()
}
-func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
- c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
+func tryReconnect(endpoint string, c *cfg) bool {
+ c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
- serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c)
+ serverOpts, found := getGRPCEndpointOpts(endpoint, c)
if !found {
- c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
+ c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
return true
}
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
- c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
- c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
+ c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
+ c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
return false
}
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
@@ -102,16 +96,16 @@ func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
- stopGRPC(ctx, "FrostFS Public API", srv, c.log)
+ stopGRPC("FrostFS Public API", srv, c.log)
})
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
- c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
+ c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
return true
}
-func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
+func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
unlock := c.LockAppConfigShared()
defer unlock()
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
@@ -122,7 +116,7 @@ func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result [
return
}
var ok bool
- result, ok = getGrpcServerOpts(ctx, c, sc)
+ result, ok = getGrpcServerOpts(c, sc)
if !ok {
return
}
@@ -131,20 +125,16 @@ func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result [
return
}
-func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
+func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
serverOpts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxRecvMsgSize),
grpc.ChainUnaryInterceptor(
- qos.NewUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(),
- qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
),
grpc.ChainStreamInterceptor(
- qos.NewStreamServerInterceptor(),
metrics.NewStreamServerInterceptor(),
tracing.NewStreamServerInterceptor(),
- qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
),
}
@@ -153,7 +143,7 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
if tlsCfg != nil {
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
return nil, false
}
@@ -184,38 +174,38 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
return serverOpts, true
}
-func serveGRPC(ctx context.Context, c *cfg) {
+func serveGRPC(c *cfg) {
c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) {
c.wg.Add(1)
go func() {
defer func() {
- c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint,
+ c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
zap.Stringer("endpoint", l.Addr()),
)
c.wg.Done()
}()
- c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
+ c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", "gRPC"),
zap.Stringer("endpoint", l.Addr()),
)
if err := s.Serve(l); err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
- c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err))
c.cfgGRPC.dropConnection(e)
- scheduleReconnect(ctx, e, c)
+ scheduleReconnect(e, c)
}
}()
})
}
-func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) {
- l = l.With(zap.String("name", name))
+func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
+ l = &logger.Logger{Logger: l.With(zap.String("name", name))}
- l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer)
+ l.Info(logs.FrostFSNodeStoppingGRPCServer)
// GracefulStop() may freeze forever, see #1270
done := make(chan struct{})
@@ -227,60 +217,9 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
select {
case <-done:
case <-time.After(1 * time.Minute):
- l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
+ l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
s.Stop()
}
- l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
-}
-
-func initRPCLimiter(c *cfg) error {
- var limits []limiting.KeyLimit
- for _, l := range rpcconfig.Limits(c.appCfg) {
- limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
- }
-
- if err := validateRPCLimits(c, limits); err != nil {
- return fmt.Errorf("validate RPC limits: %w", err)
- }
-
- limiter, err := limiting.NewSemaphoreLimiter(limits)
- if err != nil {
- return fmt.Errorf("create RPC limiter: %w", err)
- }
-
- c.cfgGRPC.limiter.Store(limiter)
- return nil
-}
-
-func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
- availableMethods := getAvailableMethods(c.cfgGRPC.servers)
- for _, limit := range limits {
- for _, method := range limit.Keys {
- if _, ok := availableMethods[method]; !ok {
- return fmt.Errorf("set limit on an unknown method %q", method)
- }
- }
- }
- return nil
-}
-
-func getAvailableMethods(servers []grpcServer) map[string]struct{} {
- res := make(map[string]struct{})
- for _, server := range servers {
- for _, method := range getMethodsForServer(server.Server) {
- res[method] = struct{}{}
- }
- }
- return res
-}
-
-func getMethodsForServer(server *grpc.Server) []string {
- var res []string
- for service, info := range server.GetServiceInfo() {
- for _, method := range info.Methods {
- res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
- }
- }
- return res
+ l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}
diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go
index 7346206ef..2ec20d848 100644
--- a/cmd/frostfs-node/httpcomponent.go
+++ b/cmd/frostfs-node/httpcomponent.go
@@ -20,9 +20,9 @@ type httpComponent struct {
preReload func(c *cfg)
}
-func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
+func (cmp *httpComponent) init(c *cfg) {
if !cmp.enabled {
- c.log.Info(ctx, cmp.name+" is disabled")
+ c.log.Info(cmp.name + " is disabled")
return
}
// Init server with parameters
@@ -39,14 +39,14 @@ func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
go func() {
defer c.wg.Done()
- c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
+ c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", cmp.name),
zap.String("endpoint", cmp.address))
fatalOnErr(srv.Serve())
}()
c.closers = append(c.closers, closer{
cmp.name,
- func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) },
+ func() { stopAndLog(c, cmp.name, srv.Shutdown) },
})
}
@@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error {
// Cleanup
delCloser(cmp.cfg, cmp.name)
// Init server with new parameters
- cmp.init(ctx, cmp.cfg)
+ cmp.init(cmp.cfg)
// Start worker
if cmp.enabled {
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index 0228d2a10..e4f0a434c 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -61,21 +61,21 @@ func main() {
var ctx context.Context
ctx, c.ctxCancel = context.WithCancel(context.Background())
- c.setHealthStatus(ctx, control.HealthStatus_STARTING)
+ c.setHealthStatus(control.HealthStatus_STARTING)
initApp(ctx, c)
bootUp(ctx, c)
- c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY)
+ c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY)
wait(c)
}
-func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) {
- c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name))
+func initAndLog(c *cfg, name string, initializer func(*cfg)) {
+ c.log.Info(fmt.Sprintf("initializing %s service...", name))
initializer(c)
- c.log.Info(ctx, name+" service has been successfully initialized")
+ c.log.Info(name + " service has been successfully initialized")
}
func initApp(ctx context.Context, c *cfg) {
@@ -85,75 +85,72 @@ func initApp(ctx context.Context, c *cfg) {
c.wg.Done()
}()
- setRuntimeParameters(ctx, c)
+ setRuntimeParameters(c)
metrics, _ := metricsComponent(c)
- initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) })
- initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) })
+ initAndLog(c, "profiler", initProfilerService)
+ initAndLog(c, metrics.name, metrics.init)
- initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) })
+ initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
initLocalStorage(ctx, c)
- initAndLog(ctx, c, "storage engine", func(c *cfg) {
+ initAndLog(c, "storage engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
})
- initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
- initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
- initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) })
+ initAndLog(c, "gRPC", initGRPC)
+ initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAccessPolicyEngine(ctx, c)
- initAndLog(ctx, c, "access policy engine", func(c *cfg) {
+ initAndLog(c, "access policy engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
})
- initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
- initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) })
- initAndLog(ctx, c, "session", initSessionService)
- initAndLog(ctx, c, "object", initObjectService)
- initAndLog(ctx, c, "tree", initTreeService)
- initAndLog(ctx, c, "apemanager", initAPEManagerService)
- initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
+ initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
+ initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
+ initAndLog(c, "session", initSessionService)
+ initAndLog(c, "object", initObjectService)
+ initAndLog(c, "tree", initTreeService)
+ initAndLog(c, "apemanager", initAPEManagerService)
+ initAndLog(c, "control", initControlService)
- initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
-
- initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
+ initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
}
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
- c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
+ c.log.Info(fmt.Sprintf("starting %s service...", name))
starter(ctx, c)
if logSuccess {
- c.log.Info(ctx, name+" service started successfully")
+ c.log.Info(name + " service started successfully")
}
}
-func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) {
- c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name))
+func stopAndLog(c *cfg, name string, stopper func() error) {
+ c.log.Debug(fmt.Sprintf("shutting down %s service", name))
- err := stopper(ctx)
+ err := stopper()
if err != nil {
- c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
- zap.Error(err),
+ c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
+ zap.String("error", err.Error()),
)
}
- c.log.Debug(ctx, name+" service has been stopped")
+ c.log.Debug(name + " service has been stopped")
}
func bootUp(ctx context.Context, c *cfg) {
- runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) })
+ runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
- bootstrapNode(ctx, c)
+ bootstrapNode(c)
startWorkers(ctx, c)
}
func wait(c *cfg) {
- c.log.Info(context.Background(), logs.CommonApplicationStarted,
+ c.log.Info(logs.CommonApplicationStarted,
zap.String("version", misc.Version))
<-c.done // graceful shutdown
@@ -163,12 +160,12 @@ func wait(c *cfg) {
go func() {
defer drain.Done()
for err := range c.internalErr {
- c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
+ c.log.Warn(logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
}
}()
- c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
+ c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
c.wg.Wait()
diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go
index d9ca01e70..19b4af51f 100644
--- a/cmd/frostfs-node/metrics.go
+++ b/cmd/frostfs-node/metrics.go
@@ -8,38 +8,38 @@ import (
func metricsComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
- if c.metrics == nil {
- c.metrics = new(httpComponent)
- c.metrics.cfg = c
- c.metrics.name = "metrics"
- c.metrics.handler = metrics.Handler()
+ if c.dynamicConfiguration.metrics == nil {
+ c.dynamicConfiguration.metrics = new(httpComponent)
+ c.dynamicConfiguration.metrics.cfg = c
+ c.dynamicConfiguration.metrics.name = "metrics"
+ c.dynamicConfiguration.metrics.handler = metrics.Handler()
updated = true
}
// (re)init read configuration
enabled := metricsconfig.Enabled(c.appCfg)
- if enabled != c.metrics.enabled {
- c.metrics.enabled = enabled
+ if enabled != c.dynamicConfiguration.metrics.enabled {
+ c.dynamicConfiguration.metrics.enabled = enabled
updated = true
}
address := metricsconfig.Address(c.appCfg)
- if address != c.metrics.address {
- c.metrics.address = address
+ if address != c.dynamicConfiguration.metrics.address {
+ c.dynamicConfiguration.metrics.address = address
updated = true
}
dur := metricsconfig.ShutdownTimeout(c.appCfg)
- if dur != c.metrics.shutdownDur {
- c.metrics.shutdownDur = dur
+ if dur != c.dynamicConfiguration.metrics.shutdownDur {
+ c.dynamicConfiguration.metrics.shutdownDur = dur
updated = true
}
- return c.metrics, updated
+ return c.dynamicConfiguration.metrics, updated
}
func enableMetricsSvc(c *cfg) {
- c.metricsSvc.Enable()
+ c.shared.metricsSvc.Enable()
}
func disableMetricsSvc(c *cfg) {
- c.metricsSvc.Disable()
+ c.shared.metricsSvc.Disable()
}
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 917cf6fc0..7178cd97d 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -14,7 +14,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -24,56 +23,12 @@ import (
const (
newEpochNotification = "NewEpoch"
+
+ // amount of tries(blocks) before notary deposit timeout.
+ notaryDepositRetriesAmount = 300
)
-func (c *cfg) initMorphComponents(ctx context.Context) {
- c.cfgMorph.guard.Lock()
- defer c.cfgMorph.guard.Unlock()
- if c.cfgMorph.initialized {
- return
- }
- initMorphClient(ctx, c)
-
- lookupScriptHashesInNNS(c) // smart contract auto negotiation
-
- err := c.cfgMorph.client.EnableNotarySupport(
- client.WithProxyContract(
- c.cfgMorph.proxyScriptHash,
- ),
- )
- fatalOnErr(err)
-
- c.log.Info(ctx, logs.FrostFSNodeNotarySupport)
-
- wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0)
- fatalOnErr(err)
-
- var netmapSource netmap.Source
-
- c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg)
- c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
-
- if c.cfgMorph.cacheTTL == 0 {
- msPerBlock, err := c.cfgMorph.client.MsPerBlock()
- fatalOnErr(err)
- c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
- c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
- }
-
- if c.cfgMorph.cacheTTL < 0 {
- netmapSource = newRawNetmapStorage(wrap)
- } else {
- // use RPC node as source of netmap (with caching)
- netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg,
- morphconfig.NetmapCandidatesPollInterval(c.appCfg))
- }
-
- c.netMapSource = netmapSource
- c.cfgNetmap.wrapper = wrap
- c.cfgMorph.initialized = true
-}
-
-func initMorphClient(ctx context.Context, c *cfg) {
+func initMorphComponents(ctx context.Context, c *cfg) {
addresses := morphconfig.RPCEndpoint(c.appCfg)
// Morph client stable-sorts endpoints by priority. Shuffle here to randomize
@@ -85,7 +40,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
cli, err := client.New(ctx,
c.key,
client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)),
- client.WithLogger(c.log.WithTag(logger.TagMorph)),
+ client.WithLogger(c.log),
client.WithMetrics(c.metricsCollector.MorphClientMetrics()),
client.WithEndpoints(addresses...),
client.WithConnLostCallback(func() {
@@ -93,46 +48,90 @@ func initMorphClient(ctx context.Context, c *cfg) {
}),
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()),
- client.WithDialerSource(c.dialerSource),
)
if err != nil {
- c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
+ c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
fatalOnErr(err)
}
c.onShutdown(func() {
- c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
+ c.log.Info(logs.FrostFSNodeClosingMorphComponents)
cli.Close()
})
if err := cli.SetGroupSignerScope(); err != nil {
- c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
+ c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
}
c.cfgMorph.client = cli
+ c.cfgMorph.notaryEnabled = cli.ProbeNotary()
+
+ lookupScriptHashesInNNS(c) // smart contract auto negotiation
+
+ if c.cfgMorph.notaryEnabled {
+ err = c.cfgMorph.client.EnableNotarySupport(
+ client.WithProxyContract(
+ c.cfgMorph.proxyScriptHash,
+ ),
+ )
+ fatalOnErr(err)
+ }
+
+ c.log.Info(logs.FrostFSNodeNotarySupport,
+ zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
+ )
+
+ wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
+ fatalOnErr(err)
+
+ var netmapSource netmap.Source
+
+ c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
+
+ if c.cfgMorph.cacheTTL == 0 {
+ msPerBlock, err := c.cfgMorph.client.MsPerBlock()
+ fatalOnErr(err)
+ c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
+ c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
+ }
+
+ if c.cfgMorph.cacheTTL < 0 {
+ netmapSource = wrap
+ } else {
+ // use RPC node as source of netmap (with caching)
+ netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap)
+ }
+
+ c.netMapSource = netmapSource
+ c.cfgNetmap.wrapper = wrap
}
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
- tx, vub, err := makeNotaryDeposit(ctx, c)
+ // skip notary deposit in non-notary environments
+ if !c.cfgMorph.notaryEnabled {
+ return
+ }
+
+ tx, err := makeNotaryDeposit(c)
fatalOnErr(err)
if tx.Equals(util.Uint256{}) {
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
- c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
+ c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
return
}
- err = waitNotaryDeposit(ctx, c, tx, vub)
+ err = waitNotaryDeposit(ctx, c, tx)
fatalOnErr(err)
}
-func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) {
+func makeNotaryDeposit(c *cfg) (util.Uint256, error) {
const (
// gasMultiplier defines how many times more the notary
// balance must be compared to the GAS balance of the node:
@@ -146,19 +145,43 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
depositAmount, err := client.CalculateNotaryDepositAmount(c.cfgMorph.client, gasMultiplier, gasDivisor)
if err != nil {
- return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err)
+ return util.Uint256{}, fmt.Errorf("could not calculate notary deposit: %w", err)
}
- return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount)
+ return c.cfgMorph.client.DepositEndlessNotary(depositAmount)
}
-func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
- if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
- return err
+var (
+ errNotaryDepositFail = errors.New("notary deposit tx has faulted")
+ errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network")
+)
+
+func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
+ for range notaryDepositRetriesAmount {
+ c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted)
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ ok, err := c.cfgMorph.client.TxHalt(tx)
+ if err == nil {
+ if ok {
+ c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
+ return nil
+ }
+
+ return errNotaryDepositFail
+ }
+
+ err = c.cfgMorph.client.Wait(ctx, 1)
+ if err != nil {
+ return fmt.Errorf("could not wait for one block in chain: %w", err)
+ }
}
- c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
- return nil
+ return errNotaryDepositTimeout
}
func listenMorphNotifications(ctx context.Context, c *cfg) {
@@ -166,23 +189,22 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
err error
subs subscriber.Subscriber
)
- log := c.log.WithTag(logger.TagMorph)
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
- c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
+ c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
- Log: log,
+ Log: c.log,
StartFromBlock: fromSideChainBlock,
Client: c.cfgMorph.client,
})
fatalOnErr(err)
lis, err := event.NewListener(event.ListenerParams{
- Logger: log,
+ Logger: c.log,
Subscriber: subs,
})
fatalOnErr(err)
@@ -200,7 +222,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
res, err := netmapEvent.ParseNewEpoch(src)
if err == nil {
- log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
+ c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
)
}
@@ -210,12 +232,12 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers)
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
- registerBlockHandler(lis, func(ctx context.Context, block *block.Block) {
- log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
+ registerBlockHandler(lis, func(block *block.Block) {
+ c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
if err != nil {
- log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
+ c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", block.Index))
}
@@ -226,17 +248,27 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
subs map[event.Type][]event.Handler,
) {
for typ, handlers := range subs {
+ pi := event.NotificationParserInfo{}
+ pi.SetType(typ)
+ pi.SetScriptHash(scHash)
+
p, ok := parsers[typ]
if !ok {
panic(fmt.Sprintf("missing parser for event %s", typ))
}
- lis.RegisterNotificationHandler(event.NotificationHandlerInfo{
- Contract: scHash,
- Type: typ,
- Parser: p,
- Handlers: handlers,
- })
+ pi.SetParser(p)
+
+ lis.SetNotificationParser(pi)
+
+ for _, h := range handlers {
+ hi := event.NotificationHandlerInfo{}
+ hi.SetType(typ)
+ hi.SetScriptHash(scHash)
+ hi.SetHandler(h)
+
+ lis.RegisterNotificationHandler(hi)
+ }
}
}
@@ -265,6 +297,10 @@ func lookupScriptHashesInNNS(c *cfg) {
)
for _, t := range targets {
+ if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
+ continue // ignore proxy contract if notary disabled
+ }
+
if emptyHash.Equals(*t.h) {
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 7dfb4fe12..8104b1dc1 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -8,7 +8,7 @@ import (
"net"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -19,7 +19,6 @@ import (
netmapTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/netmap/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
netmapService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap"
- netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"go.uber.org/zap"
@@ -62,15 +61,13 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
if ni != nil {
s.nodeInfo.Store(*ni)
- switch ni.Status() {
- case netmapSDK.Online:
+ switch {
+ case ni.IsOnline():
ctrlNetSt = control.NetmapStatus_ONLINE
- case netmapSDK.Offline:
+ case ni.IsOffline():
ctrlNetSt = control.NetmapStatus_OFFLINE
- case netmapSDK.Maintenance:
+ case ni.IsMaintenance():
ctrlNetSt = control.NetmapStatus_MAINTENANCE
- case netmapSDK.UnspecifiedState:
- ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED
}
} else {
ctrlNetSt = control.NetmapStatus_OFFLINE
@@ -81,13 +78,13 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
// nil ni means that the node is not included
// in the netmap
- niOld.SetStatus(netmapSDK.Offline)
+ niOld.SetOffline()
s.nodeInfo.Store(niOld)
}
}
- s.setControlNetmapStatus(ctrlNetSt)
+ s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
}
// sets the current node state to the given value. Subsequent cfg.bootstrap
@@ -105,7 +102,9 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) {
v := s.nodeInfo.Load()
if v != nil {
res, ok = v.(netmapSDK.NodeInfo)
- assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v))
+ if !ok {
+ panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v))
+ }
}
return
@@ -123,11 +122,7 @@ func nodeKeyFromNetmap(c *cfg) []byte {
func (c *cfg) iterateNetworkAddresses(f func(string) bool) {
ni, ok := c.cfgNetmap.state.getNodeInfo()
if ok {
- for s := range ni.NetworkEndpoints() {
- if f(s) {
- return
- }
- }
+ ni.IterateNetworkEndpoints(f)
}
}
@@ -144,11 +139,13 @@ func initNetmapService(ctx context.Context, c *cfg) {
network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo)
c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes())
parseAttributes(c)
- c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline)
+ c.cfgNodeInfo.localInfo.SetOffline()
- c.initMorphComponents(ctx)
+ if c.cfgMorph.client == nil {
+ initMorphComponents(ctx, c)
+ }
- initNetmapState(ctx, c)
+ initNetmapState(c)
server := netmapTransportGRPC.New(
netmapService.NewSignService(
@@ -169,52 +166,53 @@ func initNetmapService(ctx context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
netmapGRPC.RegisterNetmapServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(netmapGRPC.NetmapService_ServiceDesc), server)
})
addNewEpochNotificationHandlers(c)
}
func addNewEpochNotificationHandlers(c *cfg) {
- addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
+ addNewEpochNotificationHandler(c, func(ev event.Event) {
c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber())
})
- addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
+ addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
e := ev.(netmapEvent.NewEpoch).EpochNumber()
- c.updateContractNodeInfo(ctx, e)
+ c.updateContractNodeInfo(e)
- if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
+ if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
return
}
- if err := c.bootstrap(ctx); err != nil {
- c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
+ if err := c.bootstrap(); err != nil {
+ c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
}
})
- addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
- _, _, err := makeNotaryDeposit(ctx, c)
- if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
- zap.Error(err),
- )
- }
- })
+ if c.cfgMorph.notaryEnabled {
+ addNewEpochAsyncNotificationHandler(c, func(_ event.Event) {
+ _, err := makeNotaryDeposit(c)
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
+ zap.String("error", err.Error()),
+ )
+ }
+ })
+ }
}
// bootstrapNode adds current node to the Network map.
// Must be called after initNetmapService.
-func bootstrapNode(ctx context.Context, c *cfg) {
- if c.IsMaintenance() {
- c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
- return
+func bootstrapNode(c *cfg) {
+ if c.needBootstrap() {
+ if c.IsMaintenance() {
+ c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
+ return
+ }
+ err := c.bootstrap()
+ fatalOnErrDetails("bootstrap error", err)
}
- err := c.bootstrap(ctx)
- fatalOnErrDetails("bootstrap error", err)
}
func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) {
@@ -239,47 +237,46 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state.
// Must be called after Morph components initialization.
-func initNetmapState(ctx context.Context, c *cfg) {
- epoch, err := c.cfgNetmap.wrapper.Epoch(ctx)
+func initNetmapState(c *cfg) {
+ epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo
- ni, err = c.netmapInitLocalNodeState(ctx, epoch)
+ ni, err = c.netmapInitLocalNodeState(epoch)
fatalOnErrDetails("could not init network state", err)
stateWord := nodeState(ni)
- c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState,
+ c.log.Info(logs.FrostFSNodeInitialNetworkState,
zap.Uint64("epoch", epoch),
zap.String("state", stateWord),
)
- if ni != nil && ni.Status().IsMaintenance() {
+ if ni != nil && ni.IsMaintenance() {
c.isMaintenance.Store(true)
}
c.cfgNetmap.state.setCurrentEpoch(epoch)
+ c.cfgNetmap.startEpoch = epoch
c.setContractNodeInfo(ni)
}
func nodeState(ni *netmapSDK.NodeInfo) string {
if ni != nil {
- switch ni.Status() {
- case netmapSDK.Online:
+ switch {
+ case ni.IsOnline():
return "online"
- case netmapSDK.Offline:
+ case ni.IsOffline():
return "offline"
- case netmapSDK.Maintenance:
+ case ni.IsMaintenance():
return "maintenance"
- case netmapSDK.UnspecifiedState:
- return "undefined"
}
}
return "undefined"
}
-func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
- nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx)
+func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
+ nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil {
return nil, err
}
@@ -292,7 +289,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
}
}
- node, err := c.netmapLocalNodeState(ctx, epoch)
+ node, err := c.netmapLocalNodeState(epoch)
if err != nil {
return nil, err
}
@@ -306,16 +303,16 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
if nmState != candidateState {
// This happens when the node was switched to maintenance without epoch tick.
// We expect it to continue staying in maintenance.
- c.log.Info(ctx, logs.CandidateStatusPriority,
+ c.log.Info(logs.CandidateStatusPriority,
zap.String("netmap", nmState),
zap.String("candidate", candidateState))
}
return candidate, nil
}
-func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
+func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
// calculate current network state
- nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch)
+ nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch)
if err != nil {
return nil, err
}
@@ -350,29 +347,35 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) {
)
}
-func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error {
+var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode")
+
+func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
switch st {
default:
return fmt.Errorf("unsupported status %v", st)
case control.NetmapStatus_MAINTENANCE:
- return c.setMaintenanceStatus(ctx, false)
+ return c.setMaintenanceStatus(false)
case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE:
}
- c.stopMaintenance(ctx)
+ c.stopMaintenance()
+
+ if !c.needBootstrap() {
+ return errRelayBootstrap
+ }
if st == control.NetmapStatus_ONLINE {
c.cfgNetmap.reBoostrapTurnedOff.Store(false)
- return bootstrapOnline(ctx, c)
+ return bootstrapOnline(c)
}
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
- return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
+ return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {})
}
-func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) {
- epoch, err := c.netMapSource.Epoch(ctx)
+func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
+ epoch, err := c.netMapSource.Epoch()
if err != nil {
return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err)
}
@@ -380,12 +383,12 @@ func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64
return st, epoch, nil
}
-func (c *cfg) ForceMaintenance(ctx context.Context) error {
- return c.setMaintenanceStatus(ctx, true)
+func (c *cfg) ForceMaintenance() error {
+ return c.setMaintenanceStatus(true)
}
-func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
- netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx)
+func (c *cfg) setMaintenanceStatus(force bool) error {
+ netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
} else if !netSettings.MaintenanceModeAllowed {
@@ -393,10 +396,10 @@ func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
}
if err == nil || force {
- c.startMaintenance(ctx)
+ c.startMaintenance()
if err == nil {
- err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance)
+ err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance)
}
if err != nil {
@@ -409,16 +412,13 @@ func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
// calls UpdatePeerState operation of Netmap contract's client for the local node.
// State setter is used to specify node state to switch to.
-func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error {
+func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error {
var prm nmClient.UpdatePeerPrm
prm.SetKey(c.key.PublicKey().Bytes())
stateSetter(&prm)
- res, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm)
- if err != nil {
- return err
- }
- return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
+ _, err := c.cfgNetmap.wrapper.UpdatePeerState(prm)
+ return err
}
type netInfo struct {
@@ -433,7 +433,7 @@ type netInfo struct {
msPerBlockRdr func() (int64, error)
}
-func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) {
+func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
magic, err := n.magic.MagicNumber()
if err != nil {
return nil, err
@@ -443,7 +443,7 @@ func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.Net
ni.SetCurrentEpoch(n.netState.CurrentEpoch())
ni.SetMagicNumber(magic)
- netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx)
+ netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
if err != nil {
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
}
diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go
deleted file mode 100644
index e6be9cdf5..000000000
--- a/cmd/frostfs-node/netmap_source.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package main
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-type rawNetmapSource struct {
- client *netmapClient.Client
-}
-
-func newRawNetmapStorage(client *netmapClient.Client) netmap.Source {
- return &rawNetmapSource{
- client: client,
- }
-}
-
-func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
- nm, err := s.client.GetNetMap(ctx, diff)
- if err != nil {
- return nil, err
- }
- candidates, err := s.client.GetCandidates(ctx)
- if err != nil {
- return nil, err
- }
- updates := getNetMapNodesToUpdate(nm, candidates)
- if len(updates) > 0 {
- mergeNetmapWithCandidates(updates, nm)
- }
- return nm, nil
-}
-
-func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- nm, err := s.client.GetNetMapByEpoch(ctx, epoch)
- if err != nil {
- return nil, err
- }
- candidates, err := s.client.GetCandidates(ctx)
- if err != nil {
- return nil, err
- }
- updates := getNetMapNodesToUpdate(nm, candidates)
- if len(updates) > 0 {
- mergeNetmapWithCandidates(updates, nm)
- }
- return nm, nil
-}
-
-func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) {
- return s.client.Epoch(ctx)
-}
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index c33c02b3f..610e2c363 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -2,9 +2,12 @@ package main
import (
"context"
+ "errors"
"fmt"
"net"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
@@ -13,9 +16,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl"
+ v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
@@ -31,9 +38,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -54,22 +60,22 @@ type objectSvc struct {
patch *patchsvc.Service
}
-func (c *cfg) MaxObjectSize(ctx context.Context) uint64 {
- sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx)
+func (c *cfg) MaxObjectSize() uint64 {
+ sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
- zap.Error(err),
+ c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
+ zap.String("error", err.Error()),
)
}
return sz
}
-func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) {
+func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
return s.put.Put()
}
-func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) {
+func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) {
return s.patch.Patch()
}
@@ -103,15 +109,16 @@ func (s *objectSvc) GetRangeHash(ctx context.Context, req *object.GetRangeHashRe
type delNetInfo struct {
netmap.State
+ tsLifetime uint64
cfg *cfg
}
func (i *delNetInfo) TombstoneLifetime() (uint64, error) {
- return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
+ return i.tsLifetime, nil
}
-// LocalNodeID returns node owner ID calculated from configured private key.
+// returns node owner ID calculated from configured private key.
//
// Implements method needed for Object.Delete service.
func (i *delNetInfo) LocalNodeID() user.ID {
@@ -122,8 +129,8 @@ type innerRingFetcherWithNotary struct {
sidechain *morphClient.Client
}
-func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) {
- keys, err := fn.sidechain.NeoFSAlphabetList(ctx)
+func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
+ keys, err := fn.sidechain.NeoFSAlphabetList()
if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err)
}
@@ -136,6 +143,24 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]by
return result, nil
}
+type innerRingFetcherWithoutNotary struct {
+ nm *nmClient.Client
+}
+
+func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
+ keys, err := f.nm.GetInnerRingList()
+ if err != nil {
+ return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
+ }
+
+ result := make([][]byte, 0, len(keys))
+ for i := range keys {
+ result = append(result, keys[i].Bytes())
+ }
+
+ return result, nil
+}
+
func initObjectService(c *cfg) {
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
@@ -150,12 +175,11 @@ func initObjectService(c *cfg) {
sPutV2 := createPutSvcV2(sPut, keyStorage)
- sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource)
+ sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache)
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
- sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource,
- c.ObjectCfg.priorityMetrics)
+ sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource)
*c.cfgObject.getSvc = *sGet // need smth better
@@ -168,14 +192,16 @@ func initObjectService(c *cfg) {
sPatch := createPatchSvc(sGet, sPut)
// build service pipeline
- // grpc | audit | qos | | signature | response | acl | ape | split
+ // grpc | audit | | signature | response | acl | ape | split
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
- apeSvc := createAPEService(c, &irFetcher, splitSvc)
+ apeSvc := createAPEService(c, splitSvc)
+
+ aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
var commonSvc objectService.Common
- commonSvc.Init(&c.internals, apeSvc)
+ commonSvc.Init(&c.internals, aclSvc)
respSvc := objectService.NewResponseService(
&commonSvc,
@@ -187,23 +213,19 @@ func initObjectService(c *cfg) {
respSvc,
)
- c.metricsSvc = objectService.NewMetricCollector(
+ c.shared.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
- qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService)
- auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
+ auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
objectGRPC.RegisterObjectServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(objectGRPC.ObjectService_ServiceDesc), server)
})
}
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
if policerconfig.UnsafeDisable(c.appCfg) {
- c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
+ c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
return
}
@@ -214,12 +236,14 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr)
prm.WithForceRemoval()
- return ls.Inhume(ctx, prm)
+ _, err := ls.Inhume(ctx, prm)
+ return err
}
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
+
pol := policer.New(
- policer.WithLogger(c.log.WithTag(logger.TagPolicer)),
+ policer.WithLogger(c.log),
policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}),
policer.WithBuryFunc(buryFn),
policer.WithContainerSource(c.cfgObject.cnrSource),
@@ -263,9 +287,10 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr)
- if err := ls.Inhume(ctx, inhumePrm); err != nil {
- c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
- zap.Error(err),
+ _, err := ls.Inhume(ctx, inhumePrm)
+ if err != nil {
+ c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
+ zap.String("error", err.Error()),
)
}
}),
@@ -281,9 +306,14 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
})
}
-func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
- return &innerRingFetcherWithNotary{
- sidechain: c.cfgMorph.client,
+func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
+ if c.cfgMorph.client.ProbeNotary() {
+ return &innerRingFetcherWithNotary{
+ sidechain: c.cfgMorph.client,
+ }
+ }
+ return &innerRingFetcherWithoutNotary{
+ nm: c.cfgNetmap.wrapper,
}
}
@@ -291,7 +321,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
ls := c.cfgObject.cfgLocalStorage.localStorage
return replicator.New(
- replicator.WithLogger(c.log.WithTag(logger.TagReplicator)),
+ replicator.WithLogger(c.log),
replicator.WithPutTimeout(
replicatorconfig.PutTimeout(c.appCfg),
),
@@ -323,6 +353,7 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c,
c.cfgNetmap.state,
irFetcher,
+ objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
objectwriter.WithLogger(c.log),
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
)
@@ -336,7 +367,7 @@ func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Servic
return patchsvc.NewService(sPut.Config, sGet)
}
-func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source) *searchsvc.Service {
+func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return searchsvc.New(
@@ -347,8 +378,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav
),
c.netMapSource,
keyStorage,
- containerSource,
- searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)),
+ searchsvc.WithLogger(c.log),
)
}
@@ -359,7 +389,6 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage)
func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator,
coreConstructor *cache.ClientCache,
containerSource containercore.Source,
- priorityMetrics []placement.Metric,
) *getsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
@@ -369,12 +398,10 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra
ls,
traverseGen.WithTraverseOptions(
placement.SuccessAfter(1),
- placement.WithPriorityMetrics(priorityMetrics),
- placement.WithNodeState(c),
),
coreConstructor,
containerSource,
- getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc)))
+ getsvc.WithLogger(c.log))
}
func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service {
@@ -385,7 +412,7 @@ func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorag
c.netMapSource,
c,
c.cfgObject.cnrSource,
- getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)),
+ getsvcV2.WithLogger(c.log),
)
}
@@ -397,12 +424,13 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi
sSearch,
sPut,
&delNetInfo{
- State: c.cfgNetmap.state,
+ State: c.cfgNetmap.state,
+ tsLifetime: c.cfgObject.tombstoneLifetime,
cfg: c,
},
keyStorage,
- deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)),
+ deletesvc.WithLogger(c.log),
)
}
@@ -426,23 +454,63 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
)
}
-func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
+func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
+ ls := c.cfgObject.cfgLocalStorage.localStorage
+
+ return v2.New(
+ apeSvc,
+ c.netMapSource,
+ irFetcher,
+ acl.NewChecker(
+ c.cfgNetmap.state,
+ c.cfgObject.eaclSource,
+ eaclSDK.NewValidator(),
+ ls),
+ c.cfgObject.cnrSource,
+ v2.WithLogger(c.log),
+ )
+}
+
+func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService(
+ c.log,
objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc),
- c.frostfsidClient,
+ c.shared.frostfsidClient,
c.netMapSource,
c.cfgNetmap.state,
c.cfgObject.cnrSource,
c.binPublicKey,
),
- objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
splitSvc,
)
}
+type morphEACLFetcher struct {
+ w *cntClient.Client
+}
+
+func (s *morphEACLFetcher) GetEACL(cnr cid.ID) (*containercore.EACL, error) {
+ eaclInfo, err := s.w.GetEACL(cnr)
+ if err != nil {
+ return nil, err
+ }
+
+ binTable, err := eaclInfo.Value.Marshal()
+ if err != nil {
+ return nil, fmt.Errorf("marshal eACL table: %w", err)
+ }
+
+ if !eaclInfo.Signature.Verify(binTable) {
+ // TODO(@cthulhu-rider): #468 use "const" error
+ return nil, errors.New("invalid signature of the eACL table")
+ }
+
+ return eaclInfo, nil
+}
+
type engineWithoutNotifications struct {
engine *engine.StorageEngine
}
@@ -462,13 +530,14 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...)
- return e.engine.Inhume(ctx, prm)
+ _, err := e.engine.Inhume(ctx, prm)
+ return err
}
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
-func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexedContainer bool) error {
- return engine.Put(ctx, e.engine, o, indexedContainer)
+func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
+ return engine.Put(ctx, e.engine, o)
}
diff --git a/cmd/frostfs-node/policy_engine.go b/cmd/frostfs-node/policy_engine.go
index 55f76cc76..22fda2b4c 100644
--- a/cmd/frostfs-node/policy_engine.go
+++ b/cmd/frostfs-node/policy_engine.go
@@ -21,9 +21,7 @@ type accessPolicyEngine struct {
var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil)
type morphAPEChainCacheKey struct {
- // nolint:unused
- name chain.Name
- // nolint:unused
+ name chain.Name
target engine.Target
}
diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go
index e4da8119f..dcd320146 100644
--- a/cmd/frostfs-node/pprof.go
+++ b/cmd/frostfs-node/pprof.go
@@ -1,50 +1,49 @@
package main
import (
- "context"
"runtime"
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
)
-func initProfilerService(ctx context.Context, c *cfg) {
+func initProfilerService(c *cfg) {
tuneProfilers(c)
pprof, _ := pprofComponent(c)
- pprof.init(ctx, c)
+ pprof.init(c)
}
func pprofComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
- if c.pprof == nil {
- c.pprof = new(httpComponent)
- c.pprof.cfg = c
- c.pprof.name = "pprof"
- c.pprof.handler = httputil.Handler()
- c.pprof.preReload = tuneProfilers
+ if c.dynamicConfiguration.pprof == nil {
+ c.dynamicConfiguration.pprof = new(httpComponent)
+ c.dynamicConfiguration.pprof.cfg = c
+ c.dynamicConfiguration.pprof.name = "pprof"
+ c.dynamicConfiguration.pprof.handler = httputil.Handler()
+ c.dynamicConfiguration.pprof.preReload = tuneProfilers
updated = true
}
// (re)init read configuration
enabled := profilerconfig.Enabled(c.appCfg)
- if enabled != c.pprof.enabled {
- c.pprof.enabled = enabled
+ if enabled != c.dynamicConfiguration.pprof.enabled {
+ c.dynamicConfiguration.pprof.enabled = enabled
updated = true
}
address := profilerconfig.Address(c.appCfg)
- if address != c.pprof.address {
- c.pprof.address = address
+ if address != c.dynamicConfiguration.pprof.address {
+ c.dynamicConfiguration.pprof.address = address
updated = true
}
dur := profilerconfig.ShutdownTimeout(c.appCfg)
- if dur != c.pprof.shutdownDur {
- c.pprof.shutdownDur = dur
+ if dur != c.dynamicConfiguration.pprof.shutdownDur {
+ c.dynamicConfiguration.pprof.shutdownDur = dur
updated = true
}
- return c.pprof, updated
+ return c.dynamicConfiguration.pprof, updated
}
func tuneProfilers(c *cfg) {
diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go
deleted file mode 100644
index 6394b668b..000000000
--- a/cmd/frostfs-node/qos.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package main
-
-import (
- "bytes"
- "context"
-
- qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "go.uber.org/zap"
-)
-
-type cfgQoSService struct {
- netmapSource netmap.Source
- logger *logger.Logger
- allowedCriticalPubs [][]byte
- allowedInternalPubs [][]byte
-}
-
-func initQoSService(c *cfg) {
- criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg)
- internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg)
- rawCriticalPubs := make([][]byte, 0, len(criticalPubs))
- rawInternalPubs := make([][]byte, 0, len(internalPubs))
- for i := range criticalPubs {
- rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes())
- }
- for i := range internalPubs {
- rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes())
- }
-
- c.cfgQoSService = cfgQoSService{
- netmapSource: c.netMapSource,
- logger: c.log,
- allowedCriticalPubs: rawCriticalPubs,
- allowedInternalPubs: rawInternalPubs,
- }
-}
-
-func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
- rawTag, defined := qosTagging.IOTagFromContext(ctx)
- if !defined {
- if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
- }
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- }
- ioTag, err := qos.FromRawString(rawTag)
- if err != nil {
- s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- }
-
- switch ioTag {
- case qos.IOTagClient:
- return ctx
- case qos.IOTagCritical:
- for _, pk := range s.allowedCriticalPubs {
- if bytes.Equal(pk, requestSignPublicKey) {
- return ctx
- }
- }
- nm, err := s.netmapSource.GetNetMap(ctx, 0)
- if err != nil {
- s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- }
- for _, node := range nm.Nodes() {
- if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
- return ctx
- }
- }
- s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- case qos.IOTagInternal:
- if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
- return ctx
- }
- s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- default:
- s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- }
-}
-
-func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
- for _, pk := range s.allowedInternalPubs {
- if bytes.Equal(pk, publicKey) {
- return true
- }
- }
- nm, err := s.netmapSource.GetNetMap(ctx, 0)
- if err != nil {
- s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
- return false
- }
- for _, node := range nm.Nodes() {
- if bytes.Equal(node.PublicKey(), publicKey) {
- return true
- }
- }
-
- return false
-}
diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go
deleted file mode 100644
index 971f9eebf..000000000
--- a/cmd/frostfs-node/qos_test.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package main
-
-import (
- "context"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-func TestQoSService_Client(t *testing.T) {
- t.Parallel()
- s, pk := testQoSServicePrepare(t)
- t.Run("IO tag client defined", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.Internal)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.Critical)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Critical)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Internal)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
-}
-
-func TestQoSService_Internal(t *testing.T) {
- t.Parallel()
- s, pk := testQoSServicePrepare(t)
- t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagInternal.String(), tag)
- })
- t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Internal)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagInternal.String(), tag)
- })
- t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagInternal.String(), tag)
- })
- t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagInternal.String(), tag)
- })
-}
-
-func TestQoSService_Critical(t *testing.T) {
- t.Parallel()
- s, pk := testQoSServicePrepare(t)
- t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagCritical.String(), tag)
- })
- t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Critical)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagCritical.String(), tag)
- })
-}
-
-func TestQoSService_NetmapGetError(t *testing.T) {
- t.Parallel()
- s, pk := testQoSServicePrepare(t)
- s.netmapSource = &utilTesting.TestNetmapSource{}
- t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
-}
-
-func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
- nmSigner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- reqSigner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- allowedCritSigner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- allowedIntSigner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- var node netmap.NodeInfo
- node.SetPublicKey(nmSigner.PublicKey().Bytes())
- nm := &netmap.NetMap{}
- nm.SetEpoch(100)
- nm.SetNodes([]netmap.NodeInfo{node})
-
- return &cfgQoSService{
- logger: test.NewLogger(t),
- netmapSource: &utilTesting.TestNetmapSource{
- Netmaps: map[uint64]*netmap.NetMap{
- 100: nm,
- },
- CurrentEpoch: 100,
- },
- allowedCriticalPubs: [][]byte{
- allowedCritSigner.PublicKey().Bytes(),
- },
- allowedInternalPubs: [][]byte{
- allowedIntSigner.PublicKey().Bytes(),
- },
- },
- &testQoSServicePublicKeys{
- NetmapNode: nmSigner.PublicKey().Bytes(),
- Request: reqSigner.PublicKey().Bytes(),
- Internal: allowedIntSigner.PublicKey().Bytes(),
- Critical: allowedCritSigner.PublicKey().Bytes(),
- }
-}
-
-type testQoSServicePublicKeys struct {
- NetmapNode []byte
- Request []byte
- Internal []byte
- Critical []byte
-}
diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go
index f6d398574..d858ba490 100644
--- a/cmd/frostfs-node/runtime.go
+++ b/cmd/frostfs-node/runtime.go
@@ -1,7 +1,6 @@
package main
import (
- "context"
"os"
"runtime/debug"
@@ -10,17 +9,17 @@ import (
"go.uber.org/zap"
)
-func setRuntimeParameters(ctx context.Context, c *cfg) {
+func setRuntimeParameters(c *cfg) {
if len(os.Getenv("GOMEMLIMIT")) != 0 {
// default limit < yaml limit < app env limit < GOMEMLIMIT
- c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
+ c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
return
}
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
previous := debug.SetMemoryLimit(memLimitBytes)
if memLimitBytes != previous {
- c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated,
+ c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
zap.Int64("new_value", memLimitBytes),
zap.Int64("old_value", previous))
}
diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go
index fbfe3f5e6..ee21ec230 100644
--- a/cmd/frostfs-node/session.go
+++ b/cmd/frostfs-node/session.go
@@ -6,6 +6,8 @@ import (
"net"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@@ -14,9 +16,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"google.golang.org/grpc"
)
@@ -49,21 +48,18 @@ func initSessionService(c *cfg) {
_ = c.privateTokenStore.Close()
})
- addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
+ addNewEpochNotificationHandler(c, func(ev event.Event) {
c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber())
})
server := sessionTransportGRPC.New(
sessionSvc.NewSignService(
&c.key.PrivateKey,
- sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)),
+ sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log),
),
)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
sessionGRPC.RegisterSessionServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(sessionGRPC.SessionService_ServiceDesc), server)
})
}
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
index 65f5aec15..f550dd882 100644
--- a/cmd/frostfs-node/tracing.go
+++ b/cmd/frostfs-node/tracing.go
@@ -13,12 +13,12 @@ import (
func initTracing(ctx context.Context, c *cfg) {
conf, err := tracingconfig.ToTracingConfig(c.appCfg)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
return
}
_, err = tracing.Setup(ctx, *conf)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
return
}
@@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) {
defer cancel()
err := tracing.Shutdown(ctx) // cfg context cancels before close
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
}
},
})
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index 62af45389..d22e510de 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -14,7 +14,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
"google.golang.org/grpc"
@@ -30,50 +29,49 @@ type cnrSource struct {
cli *containerClient.Client
}
-func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
- return c.src.Get(ctx, id)
+func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
+ return c.src.Get(id)
}
-func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) {
- return c.src.DeletionInfo(ctx, cid)
+func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
+ return c.src.DeletionInfo(cid)
}
-func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) {
- return c.cli.ContainersOf(ctx, nil)
+func (c cnrSource) List() ([]cid.ID, error) {
+ return c.cli.ContainersOf(nil)
}
func initTreeService(c *cfg) {
treeConfig := treeconfig.Tree(c.appCfg)
if !treeConfig.Enabled() {
- c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
+ c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
return
}
c.treeService = tree.New(
tree.WithContainerSource(cnrSource{
src: c.cfgObject.cnrSource,
- cli: c.cnrClient,
+ cli: c.shared.cnrClient,
}),
- tree.WithFrostfsidSubjectProvider(c.frostfsidClient),
+ tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
+ tree.WithEACLSource(c.cfgObject.eaclSource),
tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey),
- tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)),
+ tree.WithLogger(c.log),
tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage),
tree.WithContainerCacheSize(treeConfig.CacheSize()),
tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()),
tree.WithReplicationChannelCapacity(treeConfig.ReplicationChannelCapacity()),
tree.WithReplicationWorkerCount(treeConfig.ReplicationWorkerCount()),
- tree.WithSyncBatchSize(treeConfig.SyncBatchSize()),
tree.WithAuthorizedKeys(treeConfig.AuthorizedKeys()),
tree.WithMetrics(c.metricsCollector.TreeService()),
tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()),
tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()),
tree.WithNetmapState(c.cfgNetmap.state),
- tree.WithDialerSource(c.dialerSource),
)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService))
+ tree.RegisterTreeServiceServer(s, c.treeService)
})
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
@@ -81,10 +79,10 @@ func initTreeService(c *cfg) {
}))
if d := treeConfig.SyncInterval(); d == 0 {
- addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) {
+ addNewEpochNotificationHandler(c, func(_ event.Event) {
err := c.treeService.SynchronizeAll()
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
}
})
} else {
@@ -95,7 +93,7 @@ func initTreeService(c *cfg) {
for range tick.C {
err := c.treeService.SynchronizeAll()
if err != nil {
- c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
if errors.Is(err, tree.ErrShuttingDown) {
return
}
@@ -104,17 +102,17 @@ func initTreeService(c *cfg) {
}()
}
- subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
+ subscribeToContainerRemoval(c, func(e event.Event) {
ev := e.(containerEvent.DeleteSuccess)
// This is executed asynchronously, so we don't care about the operation taking some time.
- c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
- err := c.treeService.DropTree(ctx, ev.ID, "")
+ c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
+ err := c.treeService.DropTree(context.Background(), ev.ID, "")
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
- c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
+ c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
})
diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go
index 22d2e0aa9..ae52b9e4a 100644
--- a/cmd/frostfs-node/validate.go
+++ b/cmd/frostfs-node/validate.go
@@ -30,11 +30,6 @@ func validateConfig(c *config.Config) error {
return fmt.Errorf("invalid logger destination: %w", err)
}
- err = loggerPrm.SetTags(loggerconfig.Tags(c))
- if err != nil {
- return fmt.Errorf("invalid list of allowed tags: %w", err)
- }
-
// shard configuration validation
shardNum := 0
diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go
index 495365cf0..d9c0f167f 100644
--- a/cmd/frostfs-node/validate_test.go
+++ b/cmd/frostfs-node/validate_test.go
@@ -1,6 +1,7 @@
package main
import (
+ "os"
"path/filepath"
"testing"
@@ -21,4 +22,17 @@ func TestValidate(t *testing.T) {
require.NoError(t, err)
})
})
+
+ t.Run("mainnet", func(t *testing.T) {
+ os.Clearenv() // ENVs have priority over config files, so we do this in tests
+ p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
+ c := config.New(p, "", config.EnvPrefix)
+ require.NoError(t, validateConfig(c))
+ })
+ t.Run("testnet", func(t *testing.T) {
+ os.Clearenv() // ENVs have priority over config files, so we do this in tests
+ p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
+ c := config.New(p, "", config.EnvPrefix)
+ require.NoError(t, validateConfig(c))
+ })
}
diff --git a/cmd/internal/common/ape/commands.go b/cmd/internal/common/ape/commands.go
deleted file mode 100644
index e5a35ab71..000000000
--- a/cmd/internal/common/ape/commands.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package ape
-
-import (
- "encoding/hex"
- "errors"
- "fmt"
- "strconv"
- "strings"
-
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "github.com/nspcc-dev/neo-go/cli/input"
- "github.com/spf13/cobra"
-)
-
-const (
- defaultNamespace = "root"
- namespaceTarget = "namespace"
- containerTarget = "container"
- userTarget = "user"
- groupTarget = "group"
-
- Ingress = "ingress"
- S3 = "s3"
-)
-
-var mChainName = map[string]apechain.Name{
- Ingress: apechain.Ingress,
- S3: apechain.S3,
-}
-
-var (
- errSettingDefaultValueWasDeclined = errors.New("setting default value was declined")
- errUnknownTargetType = errors.New("unknown target type")
- errUnsupportedChainName = errors.New("unsupported chain name")
-)
-
-// PrintHumanReadableAPEChain print APE chain rules.
-func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) {
- cmd.Println("Chain ID: " + string(chain.ID))
- cmd.Printf(" HEX: %x\n", chain.ID)
- cmd.Println("Rules:")
- for _, rule := range chain.Rules {
- cmd.Println("\n\tStatus: " + rule.Status.String())
- cmd.Println("\tAny: " + strconv.FormatBool(rule.Any))
- cmd.Println("\tConditions:")
- for _, c := range rule.Condition {
- var ot string
- switch c.Kind {
- case apechain.KindResource:
- ot = "Resource"
- case apechain.KindRequest:
- ot = "Request"
- default:
- panic("unknown object type")
- }
- cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value))
- }
- cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted))
- for _, name := range rule.Actions.Names {
- cmd.Println("\t\t" + name)
- }
- cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted))
- for _, name := range rule.Resources.Names {
- cmd.Println("\t\t" + name)
- }
- }
-}
-
-// ParseTarget handles target parsing of an APE chain.
-func ParseTarget(cmd *cobra.Command) engine.Target {
- typ := ParseTargetType(cmd)
- name, _ := cmd.Flags().GetString(TargetNameFlag)
- switch typ {
- case engine.Namespace:
- if name == "" {
- ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace))
- commonCmd.ExitOnErr(cmd, "read line error: %w", err)
- ln = strings.ToLower(ln)
- if len(ln) > 0 && (ln[0] == 'n') {
- commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined)
- }
- name = defaultNamespace
- }
- return engine.NamespaceTarget(name)
- case engine.Container:
- var cnr cid.ID
- commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
- return engine.ContainerTarget(name)
- case engine.User:
- return engine.UserTarget(name)
- case engine.Group:
- return engine.GroupTarget(name)
- default:
- commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
- }
- panic("unreachable")
-}
-
-// ParseTargetType handles target type parsing of an APE chain.
-func ParseTargetType(cmd *cobra.Command) engine.TargetType {
- typ, _ := cmd.Flags().GetString(TargetTypeFlag)
- switch typ {
- case namespaceTarget:
- return engine.Namespace
- case containerTarget:
- return engine.Container
- case userTarget:
- return engine.User
- case groupTarget:
- return engine.Group
- default:
- commonCmd.ExitOnErr(cmd, "parse target type error: %w", errUnknownTargetType)
- }
- panic("unreachable")
-}
-
-// ParseChainID handles the parsing of APE-chain identifier.
-// For some subcommands, chain ID is optional as an input parameter and should be generated by
-// the service instead.
-func ParseChainID(cmd *cobra.Command) (id apechain.ID) {
- chainID, _ := cmd.Flags().GetString(ChainIDFlag)
- id = apechain.ID(chainID)
-
- hexEncoded, _ := cmd.Flags().GetBool(ChainIDHexFlag)
- if !hexEncoded {
- return
- }
-
- chainIDRaw, err := hex.DecodeString(chainID)
- commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
- id = apechain.ID(chainIDRaw)
- return
-}
-
-// ParseChain parses an APE chain which can be provided either as a rule statement
-// or loaded from a binary/JSON file path.
-func ParseChain(cmd *cobra.Command) *apechain.Chain {
- chain := new(apechain.Chain)
- chain.ID = ParseChainID(cmd)
-
- if rules, _ := cmd.Flags().GetStringArray(RuleFlag); len(rules) > 0 {
- commonCmd.ExitOnErr(cmd, "parser error: %w", apeutil.ParseAPEChain(chain, rules))
- } else if encPath, _ := cmd.Flags().GetString(PathFlag); encPath != "" {
- commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", apeutil.ParseAPEChainBinaryOrJSON(chain, encPath))
- } else {
- commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed"))
- }
-
- cmd.Println("Parsed chain:")
- PrintHumanReadableAPEChain(cmd, chain)
-
- return chain
-}
-
-// ParseChainName parses chain name: the place in the request lifecycle where policy is applied.
-func ParseChainName(cmd *cobra.Command) apechain.Name {
- chainName, _ := cmd.Flags().GetString(ChainNameFlag)
- apeChainName, ok := mChainName[strings.ToLower(chainName)]
- if !ok {
- commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName)
- }
- return apeChainName
-}
diff --git a/cmd/internal/common/ape/flags.go b/cmd/internal/common/ape/flags.go
deleted file mode 100644
index d8b2e88a2..000000000
--- a/cmd/internal/common/ape/flags.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package ape
-
-const (
- RuleFlag = "rule"
- PathFlag = "path"
- PathFlagDesc = "Path to encoded chain in JSON or binary format"
- TargetNameFlag = "target-name"
- TargetNameFlagDesc = "Resource name in APE resource name format"
- TargetTypeFlag = "target-type"
- TargetTypeFlagDesc = "Resource type(container/namespace)"
- ChainIDFlag = "chain-id"
- ChainIDFlagDesc = "Chain id"
- ChainIDHexFlag = "chain-id-hex"
- ChainIDHexFlagDesc = "Flag to parse chain ID as hex"
- ChainNameFlag = "chain-name"
- ChainNameFlagDesc = "Chain name(ingress|s3)"
- AllFlag = "all"
-)
-
-const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format:
- [:status_detail] ... ... ...
-
-Status:
- - allow Permits specified actions
- - deny Prohibits specified actions
- - deny:QuotaLimitReached Denies access due to quota limits
-
-Actions:
- Object operations:
- - Object.Put, Object.Get, etc.
- - Object.* (all object operations)
- Container operations:
- - Container.Put, Container.Get, etc.
- - Container.* (all container operations)
-
-Conditions:
- ResourceCondition:
- Format: ResourceCondition:"key"=value, "key"!=value
- Reserved properties (use '\' before '$'):
- - $Object:version
- - $Object:objectID
- - $Object:containerID
- - $Object:ownerID
- - $Object:creationEpoch
- - $Object:payloadLength
- - $Object:payloadHash
- - $Object:objectType
- - $Object:homomorphicHash
-
-RequestCondition:
- Format: RequestCondition:"key"=value, "key"!=value
- Reserved properties (use '\' before '$'):
- - $Actor:publicKey
- - $Actor:role
-
- Example:
- ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others
-
-Resources:
- For objects:
- - namespace/cid/oid (specific object)
- - namespace/cid/* (all objects in container)
- - namespace/* (all objects in namespace)
- - * (all objects)
- - /* (all objects in root namespace)
- - /cid/* (all objects in root container)
- - /cid/oid (specific object in root container)
-
- For containers:
- - namespace/cid (specific container)
- - namespace/* (all containers in namespace)
- - * (all containers)
- - /cid (root container)
- - /* (all root containers)
-
-Notes:
- - Cannot mix object and container operations in one rule
- - Default behavior is Any=false unless 'any' is specified
- - Use 'all' keyword to explicitly set Any=false`
diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go
index 13f447af4..9e4fa3098 100644
--- a/cmd/internal/common/exit.go
+++ b/cmd/internal/common/exit.go
@@ -26,15 +26,13 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) {
_ = iota
internal
aclDenied
- apemanagerDenied
)
var (
code int
- internalErr = new(sdkstatus.ServerInternal)
- accessErr = new(sdkstatus.ObjectAccessDenied)
- apemanagerErr = new(sdkstatus.APEManagerAccessDenied)
+ internalErr = new(sdkstatus.ServerInternal)
+ accessErr = new(sdkstatus.ObjectAccessDenied)
)
switch {
@@ -43,21 +41,13 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) {
case errors.As(err, &accessErr):
code = aclDenied
err = fmt.Errorf("%w: %s", err, accessErr.Reason())
- case errors.As(err, &apemanagerErr):
- code = apemanagerDenied
- err = fmt.Errorf("%w: %s", err, apemanagerErr.Reason())
default:
code = internal
}
cmd.PrintErrln(err)
- for p := cmd; p != nil; p = p.Parent() {
- if p.PersistentPostRun != nil {
- p.PersistentPostRun(cmd, nil)
- if !cobra.EnableTraverseRunHooks {
- break
- }
- }
+ if cmd.PersistentPostRun != nil {
+ cmd.PersistentPostRun(cmd, nil)
}
os.Exit(code)
}
diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go
index 5dd1a060e..79b03a726 100644
--- a/cmd/internal/common/netmap.go
+++ b/cmd/internal/common/netmap.go
@@ -14,28 +14,28 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo,
) {
var strState string
- switch node.Status() {
+ switch {
default:
strState = "STATE_UNSUPPORTED"
- case netmap.Online:
+ case node.IsOnline():
strState = "ONLINE"
- case netmap.Offline:
+ case node.IsOffline():
strState = "OFFLINE"
- case netmap.Maintenance:
+ case node.IsMaintenance():
strState = "MAINTENANCE"
}
cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState)
- for endpoint := range node.NetworkEndpoints() {
+ netmap.IterateNetworkEndpoints(node, func(endpoint string) {
cmd.Printf("%s ", endpoint)
- }
+ })
cmd.Println()
if !short {
- for key, value := range node.Attributes() {
+ node.IterateAttributes(func(key, value string) {
cmd.Printf("%s\t%s: %s\n", indent, key, value)
- }
+ })
}
}
diff --git a/config/example/ir.env b/config/example/ir.env
index c13044a6e..3f9530ab6 100644
--- a/config/example/ir.env
+++ b/config/example/ir.env
@@ -1,7 +1,4 @@
FROSTFS_IR_LOGGER_LEVEL=info
-FROSTFS_IR_LOGGER_TIMESTAMP=true
-FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph"
-FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug"
FROSTFS_IR_WALLET_PATH=/path/to/wallet.json
FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX
@@ -82,12 +79,3 @@ FROSTFS_IR_PPROF_MUTEX_RATE=10000
FROSTFS_IR_PROMETHEUS_ENABLED=true
FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090
FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s
-
-FROSTFS_MULTINET_ENABLED=true
-FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
-FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
-FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
-FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
-FROSTFS_MULTINET_BALANCER=roundrobin
-FROSTFS_MULTINET_RESTRICT=false
-FROSTFS_MULTINET_FALLBACK_DELAY=350ms
diff --git a/config/example/ir.yaml b/config/example/ir.yaml
index ed53f014b..401328e72 100644
--- a/config/example/ir.yaml
+++ b/config/example/ir.yaml
@@ -2,10 +2,6 @@
logger:
level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
- timestamp: true
- tags:
- - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`.
- level: debug
wallet:
path: /path/to/wallet.json # Path to NEP-6 NEO wallet file
@@ -126,18 +122,3 @@ prometheus:
systemdnotify:
enabled: true
-
-multinet:
- enabled: true
- subnets:
- - mask: 192.168.219.174/24
- source_ips:
- - 192.168.218.185
- - 192.168.219.185
- - mask: 10.78.70.74/24
- source_ips:
- - 10.78.70.185
- - 10.78.71.185
- balancer: roundrobin
- restrict: false
- fallback_delay: 350ms
diff --git a/config/example/node.env b/config/example/node.env
index 9a2426358..030a79934 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -1,8 +1,5 @@
FROSTFS_LOGGER_LEVEL=debug
FROSTFS_LOGGER_DESTINATION=journald
-FROSTFS_LOGGER_TIMESTAMP=true
-FROSTFS_LOGGER_TAGS_0_NAMES="main, morph"
-FROSTFS_LOGGER_TAGS_0_LEVEL="debug"
FROSTFS_PPROF_ENABLED=true
FROSTFS_PPROF_ADDRESS=localhost:6060
@@ -22,9 +19,9 @@ FROSTFS_NODE_WALLET_PASSWORD=password
FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083"
FROSTFS_NODE_ATTRIBUTE_0=Price:11
FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK"
+FROSTFS_NODE_RELAY=true
FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions
FROSTFS_NODE_PERSISTENT_STATE_PATH=/state
-FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db
# Tree service section
FROSTFS_TREE_ENABLED=true
@@ -33,7 +30,6 @@ FROSTFS_TREE_REPLICATION_CHANNEL_CAPACITY=32
FROSTFS_TREE_REPLICATION_WORKER_COUNT=32
FROSTFS_TREE_REPLICATION_TIMEOUT=5s
FROSTFS_TREE_SYNC_INTERVAL=1h
-FROSTFS_TREE_SYNC_BATCH_SIZE=2000
FROSTFS_TREE_AUTHORIZED_KEYS="0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56"
# gRPC section
@@ -85,20 +81,14 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
FROSTFS_REPLICATOR_POOL_SIZE=10
-# Container service section
-FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
-
# Object service section
+FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
+FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
-FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
-
-FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
-FROSTFS_RPC_LIMITS_0_MAX_OPS=1000
-FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
-FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
# Storage engine section
+FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
## 0 shard
### Flag to refill Metabase from BlobStor
@@ -123,8 +113,7 @@ FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644
FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100
FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms
### Blobstor config
-FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true
-FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest
+FROSTFS_STORAGE_SHARD_0_COMPRESS=true
FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*"
FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true
FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7
@@ -159,54 +148,6 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
#### Limit of concurrent workers collecting expired objects by the garbage collector
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
-#### Limits config
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100
## 1 shard
### Flag to refill Metabase from BlobStor
@@ -259,25 +200,8 @@ FROSTFS_TRACING_ENABLED=true
FROSTFS_TRACING_ENDPOINT="localhost"
FROSTFS_TRACING_EXPORTER="otlp_grpc"
FROSTFS_TRACING_TRUSTED_CA=""
-FROSTFS_TRACING_ATTRIBUTES_0_KEY=key0
-FROSTFS_TRACING_ATTRIBUTES_0_VALUE=value
-FROSTFS_TRACING_ATTRIBUTES_1_KEY=key1
-FROSTFS_TRACING_ATTRIBUTES_1_VALUE=value
FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
# AUDIT section
FROSTFS_AUDIT_ENABLED=true
-
-# MULTINET section
-FROSTFS_MULTINET_ENABLED=true
-FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
-FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
-FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
-FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
-FROSTFS_MULTINET_BALANCER=roundrobin
-FROSTFS_MULTINET_RESTRICT=false
-FROSTFS_MULTINET_FALLBACK_DELAY=350ms
-
-FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
-FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
diff --git a/config/example/node.json b/config/example/node.json
index 6b7a9c2c6..4e6d239fe 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -1,14 +1,7 @@
{
"logger": {
"level": "debug",
- "destination": "journald",
- "timestamp": true,
- "tags": [
- {
- "names": "main, morph",
- "level": "debug"
- }
- ]
+ "destination": "journald"
},
"pprof": {
"enabled": true,
@@ -37,13 +30,13 @@
],
"attribute_0": "Price:11",
"attribute_1": "UN-LOCODE:RU MSK",
+ "relay": true,
"persistent_sessions": {
"path": "/sessions"
},
"persistent_state": {
"path": "/state"
- },
- "locode_db_path": "/path/to/locode/db"
+ }
},
"grpc": {
"0": {
@@ -75,7 +68,6 @@
"replication_worker_count": 32,
"replication_timeout": "5s",
"sync_interval": "1h",
- "sync_batch_size": 2000,
"authorized_keys": [
"0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0",
"02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56"
@@ -130,40 +122,18 @@
"pool_size": 10,
"put_timeout": "15s"
},
- "container": {
- "list_stream": {
- "batch_size": "500"
- }
- },
"object": {
"delete": {
"tombstone_lifetime": 10
},
"put": {
+ "remote_pool_size": 100,
+ "local_pool_size": 200,
"skip_session_token_issuer_verification": true
- },
- "get": {
- "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
}
},
- "rpc": {
- "limits": [
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/PutSingle",
- "/neo.fs.v2.object.ObjectService/Put"
- ],
- "max_ops": 1000
- },
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/Get"
- ],
- "max_ops": 10000
- }
- ]
- },
"storage": {
+ "shard_pool_size": 15,
"shard_ro_error_threshold": 100,
"shard": {
"0": {
@@ -188,15 +158,12 @@
"max_batch_size": 100,
"max_batch_delay": "10ms"
},
- "compression": {
- "enabled": true,
- "level": "fastest",
- "exclude_content_types": [
- "audio/*", "video/*"
- ],
- "estimate_compressibility": true,
- "estimate_compressibility_threshold": 0.7
- },
+ "compress": true,
+ "compression_exclude_content_types": [
+ "audio/*", "video/*"
+ ],
+ "compression_estimate_compressibility": true,
+ "compression_estimate_compressibility_threshold": 0.7,
"small_object_size": 102400,
"blobstor": [
{
@@ -229,87 +196,6 @@
"remover_sleep_interval": "2m",
"expired_collector_batch_size": 1500,
"expired_collector_worker_count": 15
- },
- "limits": {
- "read": {
- "max_running_ops": 10000,
- "max_waiting_ops": 1000,
- "idle_timeout": "30s",
- "tags": [
- {
- "tag": "internal",
- "weight": 20,
- "limit_ops": 0,
- "reserved_ops": 1000
- },
- {
- "tag": "client",
- "weight": 70,
- "reserved_ops": 10000
- },
- {
- "tag": "background",
- "weight": 5,
- "limit_ops": 10000,
- "reserved_ops": 0
- },
- {
- "tag": "writecache",
- "weight": 5,
- "limit_ops": 25000
- },
- {
- "tag": "policer",
- "weight": 5,
- "limit_ops": 25000,
- "prohibited": true
- },
- {
- "tag": "treesync",
- "weight": 5,
- "limit_ops": 25
- }
- ]
- },
- "write": {
- "max_running_ops": 1000,
- "max_waiting_ops": 100,
- "idle_timeout": "45s",
- "tags": [
- {
- "tag": "internal",
- "weight": 200,
- "limit_ops": 0,
- "reserved_ops": 100
- },
- {
- "tag": "client",
- "weight": 700,
- "reserved_ops": 1000
- },
- {
- "tag": "background",
- "weight": 50,
- "limit_ops": 1000,
- "reserved_ops": 0
- },
- {
- "tag": "writecache",
- "weight": 50,
- "limit_ops": 2500
- },
- {
- "tag": "policer",
- "weight": 50,
- "limit_ops": 2500
- },
- {
- "tag": "treesync",
- "weight": 50,
- "limit_ops": 100
- }
- ]
- }
}
},
"1": {
@@ -330,9 +216,7 @@
"max_batch_size": 200,
"max_batch_delay": "20ms"
},
- "compression": {
- "enabled": false
- },
+ "compress": false,
"small_object_size": 102400,
"blobstor": [
{
@@ -370,60 +254,14 @@
},
"tracing": {
"enabled": true,
- "endpoint": "localhost",
+ "endpoint": "localhost:9090",
"exporter": "otlp_grpc",
- "trusted_ca": "",
- "attributes":[
- {
- "key": "key0",
- "value": "value"
- },
- {
- "key": "key1",
- "value": "value"
- }
- ]
+ "trusted_ca": "/etc/ssl/tracing.pem"
},
"runtime": {
"soft_memory_limit": 1073741824
},
"audit": {
"enabled": true
- },
- "multinet": {
- "enabled": true,
- "subnets": [
- {
- "mask": "192.168.219.174/24",
- "source_ips": [
- "192.168.218.185",
- "192.168.219.185"
- ]
- },
- {
- "mask": "10.78.70.74/24",
- "source_ips":[
- "10.78.70.185",
- "10.78.71.185"
- ]
- }
- ],
- "balancer": "roundrobin",
- "restrict": false,
- "fallback_delay": "350ms"
- },
- "qos": {
- "critical": {
- "authorized_keys": [
- "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11",
- "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
- ]
- },
- "internal": {
- "authorized_keys": [
- "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2",
- "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
- ]
- }
}
}
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 2d4bc90fb..5a8e6a2a4 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -1,10 +1,6 @@
logger:
level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
destination: journald # logger destination: one of "stdout" (default), "journald"
- timestamp: true
- tags:
- - names: "main, morph"
- level: debug
systemdnotify:
enabled: true
@@ -34,11 +30,11 @@ node:
- grpcs://localhost:8083
attribute_0: "Price:11"
attribute_1: UN-LOCODE:RU MSK
+ relay: true # start Storage node in relay mode without bootstrapping into the Network map
persistent_sessions:
path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions)
persistent_state:
path: /state # path to persistent state file of Storage node
- "locode_db_path": "/path/to/locode/db"
grpc:
- endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server
@@ -62,7 +58,6 @@ tree:
replication_channel_capacity: 32
replication_timeout: 5s
sync_interval: 1h
- sync_batch_size: 2000
authorized_keys: # list of hex-encoded public keys that have rights to use the Tree Service with frostfs-cli
- 0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0
- 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56
@@ -82,11 +77,9 @@ contracts: # side chain NEOFS contract script hashes; optional, override values
morph:
dial_timeout: 30s # timeout for side chain NEO RPC client connection
- cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls).
- # Negative value disables caching. A zero value sets the default value.
+ cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
# Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables.
- container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node
rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success
- address: wss://rpc1.morph.frostfs.info:40341/ws
@@ -98,9 +91,6 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
ape_chain_cache_size: 100000
- netmap:
- candidates:
- poll_interval: 20s
apiclient:
dial_timeout: 15s # timeout for FrostFS API client connection
@@ -115,31 +105,17 @@ replicator:
put_timeout: 15s # timeout for the Replicator PUT remote operation
pool_size: 10 # maximum amount of concurrent replications
-container:
- list_stream:
- batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
-
object:
delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
put:
+ remote_pool_size: 100 # number of async workers for remote PUT operations
+ local_pool_size: 200 # number of async workers for local PUT operations
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
- get:
- priority: # list of metrics of nodes for prioritization
- - $attribute:ClusterName
- - $attribute:UN-LOCODE
-
-rpc:
- limits:
- - methods:
- - /neo.fs.v2.object.ObjectService/PutSingle
- - /neo.fs.v2.object.ObjectService/Put
- max_ops: 1000
- - methods:
- - /neo.fs.v2.object.ObjectService/Get
- max_ops: 10000
storage:
+ # note: shard configuration can be omitted for relay node (see `node.relay`)
+ shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
shard:
@@ -153,7 +129,7 @@ storage:
flush_worker_count: 30 # number of write-cache flusher threads
metabase:
- perm: 0o644 # permissions for metabase files(directories: +x for current user and group)
+ perm: 0644 # permissions for metabase files(directories: +x for current user and group)
max_batch_size: 200
max_batch_delay: 20ms
@@ -161,19 +137,18 @@ storage:
max_batch_delay: 5ms # maximum delay for a batch of operations to be executed
max_batch_size: 100 # maximum amount of operations in a single batch
- compression:
- enabled: false # turn on/off zstd compression of stored objects
+ compress: false # turn on/off zstd(level 3) compression of stored objects
small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
blobstor:
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
- perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
+ perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
- - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
+ - perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS
gc:
@@ -204,14 +179,12 @@ storage:
max_batch_size: 100
max_batch_delay: 10ms
- compression:
- enabled: true # turn on/off zstd compression of stored objects
- level: fastest
- exclude_content_types:
- - audio/*
- - video/*
- estimate_compressibility: true
- estimate_compressibility_threshold: 0.7
+ compress: true # turn on/off zstd(level 3) compression of stored objects
+ compression_exclude_content_types:
+ - audio/*
+ - video/*
+ compression_estimate_compressibility: true
+ compression_estimate_compressibility_threshold: 0.7
blobstor:
- type: blobovnicza
@@ -234,59 +207,6 @@ storage:
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
- limits:
- read:
- max_running_ops: 10000
- max_waiting_ops: 1000
- idle_timeout: 30s
- tags:
- - tag: internal
- weight: 20
- limit_ops: 0
- reserved_ops: 1000
- - tag: client
- weight: 70
- reserved_ops: 10000
- - tag: background
- weight: 5
- limit_ops: 10000
- reserved_ops: 0
- - tag: writecache
- weight: 5
- limit_ops: 25000
- - tag: policer
- weight: 5
- limit_ops: 25000
- prohibited: true
- - tag: treesync
- weight: 5
- limit_ops: 25
- write:
- max_running_ops: 1000
- max_waiting_ops: 100
- idle_timeout: 45s
- tags:
- - tag: internal
- weight: 200
- limit_ops: 0
- reserved_ops: 100
- - tag: client
- weight: 700
- reserved_ops: 1000
- - tag: background
- weight: 50
- limit_ops: 1000
- reserved_ops: 0
- - tag: writecache
- weight: 50
- limit_ops: 2500
- - tag: policer
- weight: 50
- limit_ops: 2500
- - tag: treesync
- weight: 50
- limit_ops: 100
-
1:
writecache:
path: tmp/1/cache # write-cache root directory
@@ -305,46 +225,16 @@ storage:
pilorama:
path: tmp/1/blob/pilorama.db
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
- perm: 0o644 # permission to use for the database file and intermediate directories
+ perm: 0644 # permission to use for the database file and intermediate directories
tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost"
trusted_ca: ""
- attributes:
- - key: key0
- value: value
- - key: key1
- value: value
runtime:
soft_memory_limit: 1gb
audit:
enabled: true
-
-multinet:
- enabled: true
- subnets:
- - mask: 192.168.219.174/24
- source_ips:
- - 192.168.218.185
- - 192.168.219.185
- - mask: 10.78.70.74/24
- source_ips:
- - 10.78.70.185
- - 10.78.71.185
- balancer: roundrobin
- restrict: false
- fallback_delay: 350ms
-
-qos:
- critical:
- authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag
- - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
- internal:
- authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag
- - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2
- - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a
diff --git a/config/mainnet/README.md b/config/mainnet/README.md
new file mode 100644
index 000000000..717a9b0ff
--- /dev/null
+++ b/config/mainnet/README.md
@@ -0,0 +1,28 @@
+# N3 Mainnet Storage node configuration
+
+Here is a template for simple storage node configuration in N3 Mainnet.
+Make sure to specify correct values instead of `<...>` placeholders.
+Do not change `contracts` section. Run the latest frostfs-node release with
+the fixed config `frostfs-node -c config.yml`
+
+To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
+The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
+(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
+
+## Tips
+
+Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
+```yaml
+node:
+ addresses:
+ - grpcs://frostfs.my.org:8080
+
+grpc:
+ num: 1
+ 0:
+ endpoint: frostfs.my.org:8080
+ tls:
+ enabled: true
+ certificate: /path/to/cert
+ key: /path/to/key
+```
diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml
new file mode 100644
index 000000000..d86ea451f
--- /dev/null
+++ b/config/mainnet/config.yml
@@ -0,0 +1,70 @@
+node:
+ wallet:
+ path:
+ address:
+ password:
+ addresses:
+ -
+ attribute_0: UN-LOCODE:
+ attribute_1: Price:100000
+ attribute_2: User-Agent:FrostFS\/0.9999
+
+grpc:
+ num: 1
+ 0:
+ endpoint:
+ tls:
+ enabled: false
+
+storage:
+ shard_num: 1
+ shard:
+ 0:
+ metabase:
+ path: /storage/path/metabase
+ perm: 0600
+ blobstor:
+ - path: /storage/path/blobovnicza
+ type: blobovnicza
+ perm: 0600
+ opened_cache_capacity: 32
+ depth: 1
+ width: 1
+ - path: /storage/path/fstree
+ type: fstree
+ perm: 0600
+ depth: 4
+ writecache:
+ enabled: false
+ gc:
+ remover_batch_size: 100
+ remover_sleep_interval: 1m
+
+logger:
+ level: info
+
+prometheus:
+ enabled: true
+ address: localhost:9090
+ shutdown_timeout: 15s
+
+object:
+ put:
+ remote_pool_size: 100
+ local_pool_size: 100
+
+morph:
+ rpc_endpoint:
+ - wss://rpc1.morph.frostfs.info:40341/ws
+ - wss://rpc2.morph.frostfs.info:40341/ws
+ - wss://rpc3.morph.frostfs.info:40341/ws
+ - wss://rpc4.morph.frostfs.info:40341/ws
+ - wss://rpc5.morph.frostfs.info:40341/ws
+ - wss://rpc6.morph.frostfs.info:40341/ws
+ - wss://rpc7.morph.frostfs.info:40341/ws
+ dial_timeout: 20s
+
+contracts:
+ balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
+ container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
+ netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1
diff --git a/config/testnet/README.md b/config/testnet/README.md
new file mode 100644
index 000000000..e2cda33ec
--- /dev/null
+++ b/config/testnet/README.md
@@ -0,0 +1,129 @@
+# N3 Testnet Storage node configuration
+
+There is a prepared configuration for NeoFS Storage Node deployment in
+N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
+docker image and run it with docker-compose.
+
+## Build image
+
+Prepared **frostfs-storage-testnet** image is available at Docker Hub.
+However, if you need to rebuild it for some reason, run
+`make image-storage-testnet` command.
+
+```
+$ make image-storage-testnet
+...
+Successfully built ab0557117b02
+Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
+```
+
+## Deploy node
+
+To run a storage node in N3 Testnet environment, you should deposit GAS assets,
+update docker-compose file and start the node.
+
+### Deposit
+
+The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
+bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
+
+First, obtain GAS in N3 Testnet chain. You can do that with
+[faucet](https://neowish.ngd.network) service.
+
+Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
+You can provide scripthash in the `data` argument of transfer tx to make a
+deposit to a specified account. Otherwise, deposit is made to the tx sender.
+
+NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
+so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
+
+See a deposit example with `neo-go`.
+
+```
+neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
+--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
+--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
+--token GAS \
+--amount 1
+```
+
+### Configure
+
+Next, configure `node_config.env` file. Change endpoints values. Both
+should contain your **public** IP.
+
+```
+NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
+NEOFS_NODE_ADDRESSES=65.52.183.157:36512
+```
+
+Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
+attribute.
+
+```
+NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
+NEOFS_NODE_ADDRESSES=65.52.183.157:36512
+NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
+```
+
+You can validate UN/LOCODE attribute in
+[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
+with frostfs-cli.
+
+```
+$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
+Country: Russia
+Location: Saint Petersburg (ex Leningrad)
+Continent: Europe
+Subdivision: [SPE] Sankt-Peterburg
+Coordinates: 59.53, 30.15
+```
+
+It is recommended to pass the node's key as a file. To do so, convert your wallet
+WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
+
+```
+// Print WIF in a 32-byte hex format
+$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
+PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
+PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
+WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
+Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
+ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
+ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
+
+// Save 32-byte hex into a file
+$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
+```
+
+Then, specify the path to this file in `docker-compose.yml`
+```yaml
+ volumes:
+ - frostfs_storage:/storage
+ - ./my_wallet.key:/node.key
+```
+
+
+NeoFS objects will be stored on your machine. By default, docker-compose
+is configured to store objects in named docker volume `frostfs_storage`. You can
+specify a directory on the filesystem to store objects there.
+
+```yaml
+ volumes:
+ - /home/username/frostfs/rc3/storage:/storage
+ - ./my_wallet.key:/node.key
+```
+
+### Start
+
+Run the node with `docker-compose up` command and stop it with `docker-compose down`.
+
+### Debug
+
+To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
+log, set up log level to debug with this env:
+
+```yaml
+ environment:
+ - NEOFS_LOGGER_LEVEL=debug
+```
diff --git a/config/testnet/config.yml b/config/testnet/config.yml
new file mode 100644
index 000000000..76b36cdf6
--- /dev/null
+++ b/config/testnet/config.yml
@@ -0,0 +1,52 @@
+logger:
+ level: info
+
+morph:
+ rpc_endpoint:
+ - wss://rpc01.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc02.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc03.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc04.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc05.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc06.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc07.morph.testnet.frostfs.info:51331/ws
+ dial_timeout: 20s
+
+contracts:
+ balance: e0420c216003747626670d1424569c17c79015bf
+ container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
+ netmap: d4b331639799e2958d4bc5b711b469d79de94e01
+
+node:
+ key: /node.key
+ attribute_0: Deployed:SelfHosted
+ attribute_1: User-Agent:FrostFS\/0.9999
+
+prometheus:
+ enabled: true
+ address: localhost:9090
+ shutdown_timeout: 15s
+
+storage:
+ shard_num: 1
+ shard:
+ 0:
+ metabase:
+ path: /storage/metabase
+ perm: 0777
+ blobstor:
+ - path: /storage/path/blobovnicza
+ type: blobovnicza
+ perm: 0600
+ opened_cache_capacity: 32
+ depth: 1
+ width: 1
+ - path: /storage/path/fstree
+ type: fstree
+ perm: 0600
+ depth: 4
+ writecache:
+ enabled: false
+ gc:
+ remover_batch_size: 100
+ remover_sleep_interval: 1m
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 000000000..47328c419
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,5 @@
+frostfs-node (0.0.1) stable; urgency=medium
+
+ * Initial package build
+
+ -- TrueCloudLab Tue, 25 Oct 2022 21:10:49 +0300
diff --git a/debian/clean b/debian/clean
new file mode 100644
index 000000000..44dc05e0a
--- /dev/null
+++ b/debian/clean
@@ -0,0 +1,2 @@
+man/
+debian/*.bash-completion
diff --git a/debian/control b/debian/control
new file mode 100644
index 000000000..f3f214bca
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,39 @@
+Source: frostfs-node
+Section: misc
+Priority: optional
+Maintainer: TrueCloudLab
+Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts
+Standards-Version: 4.5.1
+Homepage: https://fs.neo.org/
+Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-node.git
+Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-node
+
+Package: frostfs-storage
+Architecture: any
+Depends: ${misc:Depends}
+Description: FrostFS Storage node
+ FrostFS is a decentralized distributed object storage integrated with the NEO
+ Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
+ of storing and distributing user's data. Any Neo user may participate in the
+ network and get paid for providing storage resources to other users or store
+ their data in FrostFS and pay a competitive price for it.
+
+Package: frostfs-ir
+Architecture: any
+Depends: ${misc:Depends}, frostfs-locode-db
+Description: FrostFS InnerRing node
+ FrostFS is a decentralized distributed object storage integrated with the NEO
+ Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
+ of storing and distributing user's data. Any Neo user may participate in the
+ network and get paid for providing storage resources to other users or store
+ their data in FrostFS and pay a competitive price for it.
+
+Package: frostfs-cli
+Architecture: any
+Depends: ${misc:Depends}
+Description: CLI tools for FrostFS
+ FrostFS is a decentralized distributed object storage integrated with the NEO
+ Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
+ of storing and distributing user's data. Any Neo user may participate in the
+ network and get paid for providing storage resources to other users or store
+ their data in FrostFS and pay a competitive price for it.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 000000000..61dab665d
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,23 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: frostfs-node
+Upstream-Contact: tech@frostfs.info
+Source: https://git.frostfs.info/TrueCloudLab/frostfs-node
+
+Files: *
+Copyright: 2022-2023 TrueCloudLab (@TrueCloudLab), contributors of FrostFS project
+ 2018-2022 NeoSPCC (@nspcc-dev), contributors of NeoFS project
+ (https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/CREDITS.md)
+
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; version 3.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program or at /usr/share/common-licenses/GPL-3
+ If not, see .
diff --git a/debian/frostfs-cli.docs b/debian/frostfs-cli.docs
new file mode 100644
index 000000000..58d4559cc
--- /dev/null
+++ b/debian/frostfs-cli.docs
@@ -0,0 +1,4 @@
+CONTRIBUTING.md
+CREDITS.md
+README.md
+cmd/frostfs-adm/docs
diff --git a/debian/frostfs-cli.install b/debian/frostfs-cli.install
new file mode 100644
index 000000000..93025187b
--- /dev/null
+++ b/debian/frostfs-cli.install
@@ -0,0 +1,3 @@
+bin/frostfs-adm usr/bin
+bin/frostfs-cli usr/bin
+bin/frostfs-lens usr/bin
diff --git a/debian/frostfs-cli.manpages b/debian/frostfs-cli.manpages
new file mode 100644
index 000000000..85c5e001d
--- /dev/null
+++ b/debian/frostfs-cli.manpages
@@ -0,0 +1 @@
+man/*
diff --git a/debian/frostfs-ir.dirs b/debian/frostfs-ir.dirs
new file mode 100644
index 000000000..90da8fd27
--- /dev/null
+++ b/debian/frostfs-ir.dirs
@@ -0,0 +1,2 @@
+/etc/frostfs/ir
+/var/lib/frostfs/ir
diff --git a/debian/frostfs-ir.docs b/debian/frostfs-ir.docs
new file mode 100644
index 000000000..38b0cef26
--- /dev/null
+++ b/debian/frostfs-ir.docs
@@ -0,0 +1,3 @@
+CONTRIBUTING.md
+CREDITS.md
+README.md
diff --git a/debian/frostfs-ir.install b/debian/frostfs-ir.install
new file mode 100644
index 000000000..e052f5434
--- /dev/null
+++ b/debian/frostfs-ir.install
@@ -0,0 +1 @@
+bin/frostfs-ir usr/bin
diff --git a/debian/frostfs-ir.postinst b/debian/frostfs-ir.postinst
new file mode 100755
index 000000000..eb9d381c9
--- /dev/null
+++ b/debian/frostfs-ir.postinst
@@ -0,0 +1,51 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `configure'
+# * `abort-upgrade'
+# * `abort-remove' `in-favour'
+#
+# * `abort-remove'
+# * `abort-deconfigure' `in-favour'
+# `removing'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+case "$1" in
+ configure)
+ USERNAME=ir
+ id -u frostfs-ir >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/ir --system -M -U -c "FrostFS InnerRing node" frostfs-ir
+ if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
+ chmod -f 0750 /etc/frostfs/$USERNAME
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml
+ chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
+ chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
+ fi
+ USERDIR="$(getent passwd frostfs-$USERNAME | cut -d: -f6)"
+ if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
+ chown -f frostfs-$USERNAME: "$USERDIR"
+ fi
+ ;;
+
+ abort-upgrade|abort-remove|abort-deconfigure)
+ ;;
+
+ *)
+ echo "postinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-ir.postrm b/debian/frostfs-ir.postrm
new file mode 100755
index 000000000..cbb7db2f2
--- /dev/null
+++ b/debian/frostfs-ir.postrm
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `remove'
+# * `purge'
+# * `upgrade'
+# * `failed-upgrade'
+# * `abort-install'
+# * `abort-install'
+# * `abort-upgrade'
+# * `disappear'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ purge)
+ rm -rf /var/lib/frostfs/ir/*
+ ;;
+
+ remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
+ ;;
+
+ *)
+ echo "postrm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-ir.preinst b/debian/frostfs-ir.preinst
new file mode 100755
index 000000000..37f952537
--- /dev/null
+++ b/debian/frostfs-ir.preinst
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `install'
+# * `install'
+# * `upgrade'
+# * `abort-upgrade'
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-ir.prerm b/debian/frostfs-ir.prerm
new file mode 100755
index 000000000..0da369d75
--- /dev/null
+++ b/debian/frostfs-ir.prerm
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `remove'
+# * `upgrade'
+# * `failed-upgrade'
+# * `remove' `in-favour'
+# * `deconfigure' `in-favour'
+# `removing'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ remove|upgrade|deconfigure)
+ ;;
+
+ failed-upgrade)
+ ;;
+
+ *)
+ echo "prerm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-ir.service b/debian/frostfs-ir.service
new file mode 100644
index 000000000..304017f68
--- /dev/null
+++ b/debian/frostfs-ir.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=FrostFS InnerRing node
+Requires=network.target
+
+[Service]
+Type=notify
+NotifyAccess=all
+ExecStart=/usr/bin/frostfs-ir --config /etc/frostfs/ir/config.yml
+User=frostfs-ir
+Group=frostfs-ir
+WorkingDirectory=/var/lib/frostfs/ir
+Restart=always
+RestartSec=5
+PrivateTmp=true
+
+[Install]
+WantedBy=multi-user.target
diff --git a/debian/frostfs-storage.dirs b/debian/frostfs-storage.dirs
new file mode 100644
index 000000000..4142145ee
--- /dev/null
+++ b/debian/frostfs-storage.dirs
@@ -0,0 +1,3 @@
+/etc/frostfs/storage
+/srv/frostfs
+/var/lib/frostfs/storage
diff --git a/debian/frostfs-storage.docs b/debian/frostfs-storage.docs
new file mode 100644
index 000000000..cd1f5f23f
--- /dev/null
+++ b/debian/frostfs-storage.docs
@@ -0,0 +1,4 @@
+docs/storage-node-configuration.md
+CONTRIBUTING.md
+CREDITS.md
+README.md
diff --git a/debian/frostfs-storage.install b/debian/frostfs-storage.install
new file mode 100644
index 000000000..670935e7b
--- /dev/null
+++ b/debian/frostfs-storage.install
@@ -0,0 +1 @@
+bin/frostfs-node usr/bin
diff --git a/debian/frostfs-storage.postinst b/debian/frostfs-storage.postinst
new file mode 100755
index 000000000..88fa53be5
--- /dev/null
+++ b/debian/frostfs-storage.postinst
@@ -0,0 +1,55 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `configure'
+# * `abort-upgrade'
+# * `abort-remove' `in-favour'
+#
+# * `abort-remove'
+# * `abort-deconfigure' `in-favour'
+# `removing'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+case "$1" in
+ configure)
+ USERNAME=storage
+ id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -M -U -c "FrostFS Storage node" frostfs-$USERNAME
+ if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
+ chmod -f 0750 /etc/frostfs/$USERNAME
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml
+ chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
+ chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
+ fi
+ USERDIR=$(getent passwd frostfs-$USERNAME | cut -d: -f6)
+ if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
+ chown -f frostfs-$USERNAME: "$USERDIR"
+ fi
+ USERDIR=/srv/frostfs
+ if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then
+ chown -f frostfs-$USERNAME: $USERDIR
+ fi
+ ;;
+
+ abort-upgrade|abort-remove|abort-deconfigure)
+ ;;
+
+ *)
+ echo "postinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-storage.postrm b/debian/frostfs-storage.postrm
new file mode 100755
index 000000000..d9c8c9656
--- /dev/null
+++ b/debian/frostfs-storage.postrm
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `remove'
+# * `purge'
+# * `upgrade'
+# * `failed-upgrade'
+# * `abort-install'
+# * `abort-install'
+# * `abort-upgrade'
+# * `disappear'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ purge)
+ rm -rf /var/lib/frostfs/storage/*
+ ;;
+
+ remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
+ ;;
+
+ *)
+ echo "postrm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-storage.preinst b/debian/frostfs-storage.preinst
new file mode 100755
index 000000000..37f952537
--- /dev/null
+++ b/debian/frostfs-storage.preinst
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `install'
+# * `install'
+# * `upgrade'
+# * `abort-upgrade'
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-storage.prerm b/debian/frostfs-storage.prerm
new file mode 100755
index 000000000..0da369d75
--- /dev/null
+++ b/debian/frostfs-storage.prerm
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `remove'
+# * `upgrade'
+# * `failed-upgrade'
+# * `remove' `in-favour'
+# * `deconfigure' `in-favour'
+# `removing'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ remove|upgrade|deconfigure)
+ ;;
+
+ failed-upgrade)
+ ;;
+
+ *)
+ echo "prerm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-storage.service b/debian/frostfs-storage.service
new file mode 100644
index 000000000..573961756
--- /dev/null
+++ b/debian/frostfs-storage.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=FrostFS Storage node
+Requires=network.target
+
+[Service]
+Type=notify
+NotifyAccess=all
+ExecStart=/usr/bin/frostfs-node --config /etc/frostfs/storage/config.yml
+User=frostfs-storage
+Group=frostfs-storage
+WorkingDirectory=/srv/frostfs
+Restart=always
+RestartSec=5
+PrivateTmp=true
+
+[Install]
+WantedBy=multi-user.target
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 000000000..0dd8ee399
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,40 @@
+#!/usr/bin/make -f
+
+# Do not try to strip Go binaries
+export DEB_BUILD_OPTIONS := nostrip
+
+%:
+ dh $@ --with bash-completion
+
+override_dh_auto_test:
+
+override_dh_auto_install:
+ echo $(DEB_BUILD_OPTIONS)
+ dh_auto_install
+
+ bin/frostfs-adm gendoc --type man man/
+ bin/frostfs-cli gendoc --type man man/
+
+ bin/frostfs-adm completion bash > debian/frostfs-adm.bash-completion
+ bin/frostfs-cli completion bash > debian/frostfs-cli.bash-completion
+ install -m 0755 -d debian/frostfs-cli/usr/share/fish/completions/
+ install -m 0755 -d debian/frostfs-cli/usr/share/zsh/vendor-completions/
+ bin/frostfs-adm completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-adm.fish
+ bin/frostfs-adm completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-adm
+ bin/frostfs-cli completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-cli.fish
+ bin/frostfs-cli completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-cli
+
+ install -T -m 0640 config/example/ir.yaml debian/frostfs-ir/etc/frostfs/ir/config.yml
+ install -T -m 0640 config/example/ir-control.yaml debian/frostfs-ir/etc/frostfs/ir/control.yml
+ install -T -m 0640 config/example/node.yaml debian/frostfs-storage/etc/frostfs/storage/config.yml
+ install -T -m 0640 config/example/node-control.yaml debian/frostfs-storage/etc/frostfs/storage/control.yml
+
+override_dh_installsystemd:
+ dh_installsystemd --no-enable --no-start --name=frostfs-ir
+ dh_installsystemd --no-enable --no-start --name=frostfs-storage
+
+override_dh_installchangelogs:
+ dh_installchangelogs -k CHANGELOG.md
+
+override_dh_installdocs:
+ dh_installdocs
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 000000000..163aaf8d8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json
index b68ce4fa3..990fd42a8 100644
--- a/dev/.vscode-example/launch.json
+++ b/dev/.vscode-example/launch.json
@@ -42,6 +42,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
+ "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080",
@@ -77,12 +78,7 @@
"FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s1/pilorama1",
"FROSTFS_PROMETHEUS_ENABLED":"true",
"FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9090",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
- "FROSTFS_TRACING_ENABLED":"true",
- "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
- "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
- "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
- "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8080"
+ "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s"
},
"postDebugTask": "env-down"
},
@@ -97,6 +93,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
+ "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082",
@@ -132,12 +129,7 @@
"FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s2/pilorama1",
"FROSTFS_PROMETHEUS_ENABLED":"true",
"FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9091",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
- "FROSTFS_TRACING_ENABLED":"true",
- "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
- "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
- "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
- "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8082"
+ "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s"
},
"postDebugTask": "env-down"
},
@@ -152,6 +144,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
+ "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084",
@@ -187,12 +180,7 @@
"FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s3/pilorama1",
"FROSTFS_PROMETHEUS_ENABLED":"true",
"FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9092",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
- "FROSTFS_TRACING_ENABLED":"true",
- "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
- "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
- "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
- "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8084"
+ "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s"
},
"postDebugTask": "env-down"
},
@@ -207,6 +195,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
+ "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086",
@@ -242,12 +231,7 @@
"FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s4/pilorama1",
"FROSTFS_PROMETHEUS_ENABLED":"true",
"FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9093",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
- "FROSTFS_TRACING_ENABLED":"true",
- "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
- "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
- "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
- "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8086"
+ "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s"
},
"postDebugTask": "env-down"
}
diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml
index 40ed35aeb..9d026797c 100644
--- a/dev/docker-compose.yml
+++ b/dev/docker-compose.yml
@@ -3,7 +3,7 @@
version: "2.4"
services:
neo-go:
- image: nspccdev/neo-go:0.106.0
+ image: nspccdev/neo-go:0.105.0
container_name: neo-go
command: ["node", "--config-path", "/config", "--privnet", "--debug"]
stop_signal: SIGKILL
@@ -14,15 +14,3 @@ services:
- ./neo-go/node-wallet.json:/wallets/node-wallet.json
- ./neo-go/config.yml:/wallets/config.yml
- ./neo-go/wallet.json:/wallets/wallet.json
- jaeger:
- image: jaegertracing/all-in-one:latest
- container_name: jaeger
- ports:
- - '4317:4317' #OTLP over gRPC
- - '4318:4318' #OTLP over HTTP
- - '16686:16686' #frontend
- stop_signal: SIGKILL
- environment:
- - COLLECTOR_OTLP_ENABLED=true
- - SPAN_STORAGE_TYPE=badger
- - BADGER_EPHEMERAL=true
diff --git a/docs/building-deb-package.md b/docs/building-deb-package.md
new file mode 100644
index 000000000..26a77a27f
--- /dev/null
+++ b/docs/building-deb-package.md
@@ -0,0 +1,46 @@
+# Building Debian package on host
+
+## Prerequisites
+
+For now, we're assuming building for Debian 11 (stable) x86_64.
+
+Go version 18.4 or later should already be installed, i.e. this runs
+successfully:
+
+* `make all`
+
+## Installing packaging dependencies
+
+```shell
+$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
+```
+
+Warining: number of package installed is pretty large considering dependecies.
+
+## Package building
+
+```shell
+$ make debpackage
+```
+
+## Leftovers cleaning
+
+```shell
+$ make debclean
+```
+or
+```shell
+$ dh clean
+```
+
+# Package versioning
+
+By default, package version is based on product version and may also contain git
+tags and hashes.
+
+Package version could be overwritten by setting `PKG_VERSION` variable before
+build, Debian package versioning rules should be respected.
+
+```shell
+$ PKG_VERSION=0.32.0 make debpackge
+```
diff --git a/docs/evacuation.md b/docs/evacuation.md
index d47d56d15..885ce169a 100644
--- a/docs/evacuation.md
+++ b/docs/evacuation.md
@@ -20,12 +20,7 @@ Because it is necessary to prevent removing by policer objects with policy `REP
## Commands
-`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag.
-By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
-To evacuate objects only from containers with policy `REP 1` use option `--rep-one-only`.
-To adjust resource consumption required for evacuation use options:
- - `--container-worker-count` count of concurrent container evacuation workers
- - `--object-worker-count` count of concurrent object evacuation workers
+`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
`frostfs-cli control shards evacuation stop` stops running evacuation process.
diff --git a/docs/release-instruction.md b/docs/release-instruction.md
index aa867e83c..3aebc8e66 100644
--- a/docs/release-instruction.md
+++ b/docs/release-instruction.md
@@ -43,6 +43,11 @@ Write new revision number into the root `VERSION` file:
$ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION
```
+Update version in Debian package changelog file
+```
+$ cat debian/changelog
+```
+
Update the supported version of `TrueCloudLab/frostfs-contract` module in root
`README.md` if needed.
@@ -55,7 +60,7 @@ Add an entry to the `CHANGELOG.md` following the style established there.
* update `Unreleased...new` and `new...old` diff-links at the bottom of the file
* add optional codename and release date in the heading
* remove all empty sections such as `Added`, `Removed`, etc.
-* make sure all changes have references to relevant issues in `#123` format (if possible)
+* make sure all changes have references to GitHub issues in `#123` format (if possible)
* clean up all `Unreleased` sections and leave them empty
### Make release commit
@@ -95,20 +100,24 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release
-### Prepare and push images to a Docker registry (automated)
+### Prepare and push images to a Docker Hub (if not automated)
-Create Docker images for all applications and push them into container registry
-(executed automatically in Forgejo Actions upon pushing a release tag):
+Create Docker images for all applications and push them into Docker Hub
+(requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images
-$ make push-images
+$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
+$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
+$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
+$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
+$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
```
-### Make a proper release (if not automated)
+### Make a proper GitHub release (if not automated)
-Edit an automatically-created release on git.frostfs.info, copy things from `CHANGELOG.md`.
+Edit an automatically-created release on GitHub, copy things from `CHANGELOG.md`.
Build and tar release binaries with `make prepare-release`, attach them to
the release. Publish the release.
@@ -117,7 +126,7 @@ the release. Publish the release.
Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
with new versions.
-### Close milestone
+### Close GitHub milestone
Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
diff --git a/docs/shard-modes.md b/docs/shard-modes.md
index 6cc4ab13c..3b459335b 100644
--- a/docs/shard-modes.md
+++ b/docs/shard-modes.md
@@ -51,7 +51,10 @@ However, all mode changing operations are idempotent.
## Automatic mode changes
-A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
+Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
+1. If the metabase was not available or couldn't be opened/initialized during shard startup.
+2. If shard error counter exceeds threshold.
+3. If the metabase couldn't be reopened during SIGHUP handling.
# Detach shard
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index da9fdfed0..c74695e2b 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -12,23 +12,21 @@ There are some custom types used for brevity:
# Structure
-| Section | Description |
-|--------------|---------------------------------------------------------|
-| `node` | [Node parameters](#node-section) |
-| `logger` | [Logging parameters](#logger-section) |
-| `pprof` | [PProf configuration](#pprof-section) |
-| `prometheus` | [Prometheus metrics configuration](#prometheus-section) |
-| `control` | [Control service configuration](#control-section) |
-| `contracts` | [Override FrostFS contracts hashes](#contracts-section) |
-| `morph` | [N3 blockchain client configuration](#morph-section) |
-| `apiclient` | [FrostFS API client configuration](#apiclient-section) |
-| `policer` | [Policer service configuration](#policer-section) |
-| `replicator` | [Replicator service configuration](#replicator-section) |
-| `storage` | [Storage engine configuration](#storage-section) |
-| `runtime` | [Runtime configuration](#runtime-section) |
-| `audit` | [Audit configuration](#audit-section) |
-| `multinet` | [Multinet configuration](#multinet-section) |
-| `qos` | [QoS configuration](#qos-section) |
+| Section | Description |
+|------------------------|---------------------------------------------------------------------|
+| `logger` | [Logging parameters](#logger-section) |
+| `pprof` | [PProf configuration](#pprof-section) |
+| `prometheus` | [Prometheus metrics configuration](#prometheus-section) |
+| `control` | [Control service configuration](#control-section) |
+| `contracts` | [Override FrostFS contracts hashes](#contracts-section) |
+| `morph` | [N3 blockchain client configuration](#morph-section) |
+| `apiclient` | [FrostFS API client configuration](#apiclient-section) |
+| `policer` | [Policer service configuration](#policer-section) |
+| `replicator` | [Replicator service configuration](#replicator-section) |
+| `storage` | [Storage engine configuration](#storage-section) |
+| `runtime` | [Runtime configuration](#runtime-section) |
+| `audit` | [Audit configuration](#audit-section) |
+
# `control` section
```yaml
@@ -112,21 +110,11 @@ Contains logger parameters.
```yaml
logger:
level: info
- tags:
- - names: "main, morph"
- level: debug
```
-| Parameter | Type | Default value | Description |
-|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------|
-| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
-| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. |
-
-## `tags` subsection
-| Parameter | Type | Default value | Description |
-|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. |
-| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. |
+| Parameter | Type | Default value | Description |
+|-----------|----------|---------------|---------------------------------------------------------------------------------------------------|
+| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
# `contracts` section
Contains override values for FrostFS side-chain contract hashes. Most of the time contract
@@ -159,19 +147,15 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
switch_interval: 2m
- netmap:
- candidates:
- poll_interval: 20s
```
-| Parameter | Type | Default value | Description |
-|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
-| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
-| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
-| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
-| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
-| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
+| Parameter | Type | Default value | Description |
+| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
+| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
+| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
+| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
+| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
## `rpc_endpoint` subsection
| Parameter | Type | Default value | Description |
@@ -185,6 +169,7 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
+| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@@ -195,41 +180,20 @@ Contains configuration for each shard. Keys must be consecutive numbers starting
`default` subsection has the same format and specifies defaults for missing values.
The following table describes configuration for each shard.
-| Parameter | Type | Default value | Description |
-| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- |
-| `compression` | [Compression config](#compression-subsection) | | Compression config. |
-| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
-| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
-| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
-| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
-| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
-| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
-| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
-| `gc` | [GC config](#gc-subsection) | | GC configuration. |
-| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
-
-### `compression` subsection
-
-Contains compression config.
-
-```yaml
-compression:
- enabled: true
- level: smallest_size
- exclude_content_types:
- - audio/*
- - video/*
- estimate_compressibility: true
- estimate_compressibility_threshold: 0.7
-```
-
-| Parameter | Type | Default value | Description |
-| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `enabled` | `bool` | `false` | Flag to enable compression. |
-| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. |
-| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
-| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
-| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
+| Parameter | Type | Default value | Description |
+| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `compress` | `bool` | `false` | Flag to enable compression. |
+| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
+| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
+| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
+| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
+| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
+| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
+| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
+| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
+| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
+| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
+| `gc` | [GC config](#gc-subsection) | | GC configuration. |
### `blobstor` subsection
@@ -244,7 +208,7 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
- perm: 0o644
+ perm: 0644
size: 4194304
depth: 1
width: 4
@@ -304,7 +268,7 @@ gc:
```yaml
metabase:
path: /path/to/meta.db
- perm: 0o644
+ perm: 0644
max_batch_size: 200
max_batch_delay: 20ms
```
@@ -336,65 +300,6 @@ writecache:
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
-### `limits` subsection
-
-```yaml
-limits:
- max_read_running_ops: 10000
- max_read_waiting_ops: 1000
- max_write_running_ops: 1000
- max_write_waiting_ops: 100
- read:
- - tag: internal
- weight: 20
- limit_ops: 0
- reserved_ops: 1000
- - tag: client
- weight: 70
- reserved_ops: 10000
- - tag: background
- weight: 5
- limit_ops: 10000
- reserved_ops: 0
- - tag: writecache
- weight: 5
- limit_ops: 25000
- - tag: policer
- weight: 5
- limit_ops: 25000
- write:
- - tag: internal
- weight: 200
- limit_ops: 0
- reserved_ops: 100
- - tag: client
- weight: 700
- reserved_ops: 1000
- - tag: background
- weight: 50
- limit_ops: 1000
- reserved_ops: 0
- - tag: writecache
- weight: 50
- limit_ops: 2500
- - tag: policer
- weight: 50
- limit_ops: 2500
-```
-
-| Parameter | Type | Default value | Description |
-| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
-| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
-| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
-| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
-| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
-| `read` | `[]tag` | empty | Array of shard read settings for tags. |
-| `write` | `[]tag` | empty | Array of shard write settings for tags. |
-| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
-| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
-| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
-| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
-| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section
@@ -410,22 +315,22 @@ node:
- "Price:11"
- "UN-LOCODE:RU MSK"
- "key:value"
+ relay: false
persistent_sessions:
path: /sessions
persistent_state:
path: /state
- locode_db_path: "/path/to/locode/db"
```
-| Parameter | Type | Default value | Description |
-|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------|
-| `key` | `string` | | Path to the binary-encoded private key. |
-| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
-| `addresses` | `[]string` | | Addresses advertised in the netmap. |
-| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. |
-| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. |
-| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. |
-| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. |
+| Parameter | Type | Default value | Description |
+|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------|
+| `key` | `string` | | Path to the binary-encoded private key. |
+| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
+| `addresses` | `[]string` | | Addresses advertised in the netmap. |
+| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. |
+| `relay` | `bool` | | Enable relay mode. |
+| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. |
+| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. |
## `wallet` subsection
N3 wallet configuration.
@@ -490,46 +395,25 @@ replicator:
pool_size: 10
```
-| Parameter | Type | Default value | Description |
-|---------------|------------|---------------|---------------------------------------------|
-| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
-| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. |
+| Parameter | Type | Default value | Description |
+|---------------|------------|----------------------------------------|---------------------------------------------|
+| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
+| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. |
# `object` section
Contains object-service related parameters.
```yaml
object:
- get:
- priority:
- - $attribute:ClusterName
+ put:
+ remote_pool_size: 100
```
-| Parameter | Type | Default value | Description |
-|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
-| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
-| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
-
-
-# `rpc` section
-Contains limits on the number of active RPC for specified method(s).
-
-```yaml
-rpc:
- limits:
- - methods:
- - /neo.fs.v2.object.ObjectService/PutSingle
- - /neo.fs.v2.object.ObjectService/Put
- max_ops: 1000
- - methods:
- - /neo.fs.v2.object.ObjectService/Get
- max_ops: 10000
-```
-
-| Parameter | Type | Default value | Description |
-|------------------|------------|---------------|--------------------------------------------------------------|
-| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) |
-| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit |
+| Parameter | Type | Default value | Description |
+|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------|
+| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
+| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
+| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
# `runtime` section
Contains runtime parameters.
@@ -551,52 +435,6 @@ audit:
enabled: true
```
-| Parameter | Type | Default value | Description |
-|-----------|--------|---------------|---------------------------------------------------|
-| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. |
-
-
-# `multinet` section
-Contains multinet parameters.
-
-```yaml
-multinet:
- enabled: true
- subnets:
- - mask: 192.168.219.174/24
- source_ips:
- - 192.168.218.185
- - 192.168.219.185
- - mask: 10.78.70.74/24
- source_ips:
- - 10.78.70.185
- - 10.78.71.185
- balancer: roundrobin
- restrict: false
- fallback_delay: 350ms
-```
-
-| Parameter | Type | Default value | Description |
-| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- |
-| `enabled` | `bool` | false | If `true` then source-based routing is enabled. |
-| `subnets` | `subnet` | empty | Resulting subnets. |
-| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
-| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
-| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
-
-# `qos` section
-```yaml
-qos:
- critical:
- authorized_keys:
- - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
- internal:
- authorized_keys:
- - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
-```
-| Parameter | Type | Default value | Description |
-| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- |
-| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. |
-| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. |
+| Parameter | Type | Default value | Description |
+|---------------------|--------|---------------|---------------------------------------------------|
+| `soft_memory_limit` | `bool` | false | If `true` then audit event logs will be recorded. |
diff --git a/docs/update-go-instruction.md b/docs/update-go-instruction.md
index 195e0c6b3..f99225046 100644
--- a/docs/update-go-instruction.md
+++ b/docs/update-go-instruction.md
@@ -7,7 +7,7 @@
## Update CI
Change Golang versions for unit test in CI.
-There is `go` section in `.forgejo/workflows/*.yml` files:
+There is `go` section in `.github/workflows/go.yaml` file:
```yaml
jobs:
test:
diff --git a/go.mod b/go.mod
index 6f1950936..621d2e85d 100644
--- a/go.mod
+++ b/go.mod
@@ -1,25 +1,23 @@
module git.frostfs.info/TrueCloudLab/frostfs-node
-go 1.23.0
+go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb
+ git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
- git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2
- git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
- git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa
+ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
+ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad
git.frostfs.info/TrueCloudLab/hrw v1.2.1
- git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
- git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991
+ git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/VictoriaMetrics/easyproto v0.1.4
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
- github.com/felixge/fgprof v0.9.5
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
github.com/gdamore/tcell/v2 v2.7.4
github.com/go-pkgz/expirable-cache/v3 v3.0.0
@@ -27,9 +25,10 @@ require (
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/klauspost/compress v1.17.4
github.com/mailru/easyjson v0.7.7
+ github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.2.0
- github.com/multiformats/go-multiaddr v0.15.0
- github.com/nspcc-dev/neo-go v0.106.3
+ github.com/multiformats/go-multiaddr v0.12.1
+ github.com/nspcc-dev/neo-go v0.106.2
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
github.com/prometheus/client_golang v1.19.0
@@ -41,14 +40,15 @@ require (
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10
- go.opentelemetry.io/otel v1.31.0
- go.opentelemetry.io/otel/trace v1.31.0
+ go.opentelemetry.io/otel v1.28.0
+ go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/zap v1.27.0
- golang.org/x/sync v0.12.0
- golang.org/x/sys v0.31.0
- golang.org/x/term v0.30.0
- google.golang.org/grpc v1.69.2
- google.golang.org/protobuf v1.36.1
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
+ golang.org/x/sync v0.7.0
+ golang.org/x/sys v0.22.0
+ golang.org/x/term v0.21.0
+ google.golang.org/grpc v1.66.2
+ google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1
)
@@ -60,7 +60,7 @@ require (
require (
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
- github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
@@ -76,7 +76,6 @@ require (
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
- github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
@@ -85,9 +84,9 @@ require (
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/ipfs/go-cid v0.5.0 // indirect
+ github.com/ipfs/go-cid v0.4.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
- github.com/klauspost/cpuid/v2 v2.2.10 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/klauspost/reedsolomon v1.12.1 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
@@ -101,7 +100,7 @@ require (
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
- github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect
+ github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -119,19 +118,18 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
- go.opentelemetry.io/otel/metric v1.31.0 // indirect
- go.opentelemetry.io/otel/sdk v1.31.0 // indirect
+ go.opentelemetry.io/otel/metric v1.28.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.36.0 // indirect
- golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
- golang.org/x/net v0.30.0 // indirect
- golang.org/x/text v0.23.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
+ golang.org/x/crypto v0.24.0 // indirect
+ golang.org/x/net v0.26.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
- lukechampine.com/blake3 v1.4.0 // indirect
+ lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
-replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07
+replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928
diff --git a/go.sum b/go.sum
index 5b075f60a..4d21d9bca 100644
--- a/go.sum
+++ b/go.sum
@@ -1,25 +1,23 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
-git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY=
-git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU=
-git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs=
+git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
+git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
-git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
-git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
-git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
-git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
-git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA=
-git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
+git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
+git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
+git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
+git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
@@ -29,8 +27,8 @@ git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
-github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
-github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
@@ -41,9 +39,6 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
-github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
-github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
-github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
@@ -71,8 +66,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
-github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
-github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
@@ -96,9 +89,6 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw=
github.com/go-pkgz/expirable-cache/v3 v3.0.0/go.mod h1:2OQiDyEGQalYecLWmXprm3maPXeVb5/6/X7yRPYTzec=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
-github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -108,8 +98,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
-github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -119,8 +107,6 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
-github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -142,17 +128,16 @@ github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
-github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
+github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
+github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
-github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
-github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
+github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q=
github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -161,7 +146,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
-github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
@@ -181,6 +165,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
@@ -192,8 +178,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
-github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo=
-github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
+github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk=
+github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
@@ -202,8 +188,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
@@ -222,7 +208,6 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
-github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo=
github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
@@ -271,7 +256,6 @@ github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4J
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -294,22 +278,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
-go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
-go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
+go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
-go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
-go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
-go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
-go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
-go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
-go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
-go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
-go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
+go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
+go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
+go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -324,15 +306,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
-golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
-golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
-golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
+golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -345,16 +327,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
-golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -377,20 +359,19 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -398,26 +379,26 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
-golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
+golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
+golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
-google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
-google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
-google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
+google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -426,8 +407,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
-google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -445,7 +426,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w=
-lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0=
+lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
+lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
diff --git a/pkg/util/ape/converter.go b/internal/ape/converter.go
similarity index 99%
rename from pkg/util/ape/converter.go
rename to internal/ape/converter.go
index c706cf052..eb80e7ded 100644
--- a/pkg/util/ape/converter.go
+++ b/internal/ape/converter.go
@@ -4,7 +4,7 @@ import (
"encoding/hex"
"fmt"
- v2acl "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
+ v2acl "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
diff --git a/pkg/util/ape/converter_test.go b/internal/ape/converter_test.go
similarity index 100%
rename from pkg/util/ape/converter_test.go
rename to internal/ape/converter_test.go
diff --git a/internal/assert/cond.go b/internal/assert/cond.go
deleted file mode 100644
index 113d2eba9..000000000
--- a/internal/assert/cond.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package assert
-
-import (
- "fmt"
- "strings"
-)
-
-func True(cond bool, details ...string) {
- if !cond {
- panic(strings.Join(details, " "))
- }
-}
-
-func False(cond bool, details ...string) {
- if cond {
- panic(strings.Join(details, " "))
- }
-}
-
-func NoError(err error, details ...string) {
- if err != nil {
- content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " "))
- panic(content)
- }
-}
-
-func Fail(details ...string) {
- panic(strings.Join(details, " "))
-}
diff --git a/internal/audit/request.go b/internal/audit/request.go
index 17666ab4b..cf0797300 100644
--- a/internal/audit/request.go
+++ b/internal/audit/request.go
@@ -1,12 +1,10 @@
package audit
import (
- "context"
-
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
@@ -19,15 +17,15 @@ type Target interface {
String() string
}
-func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) {
+func LogRequest(log *logger.Logger, operation string, req Request, target Target, status bool) {
var key []byte
if req != nil {
key = req.GetVerificationHeader().GetBodySignature().GetKey()
}
- LogRequestWithKey(ctx, log, operation, key, target, status)
+ LogRequestWithKey(log, operation, key, target, status)
}
-func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) {
+func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target Target, status bool) {
object, subject := NotDefined, NotDefined
publicKey := crypto.UnmarshalPublicKey(key)
@@ -39,7 +37,7 @@ func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string
object = target.String()
}
- log.Info(ctx, logs.AuditEventLogRecord,
+ log.Info(logs.AuditEventLogRecord,
zap.String("operation", operation),
zap.String("object", object),
zap.String("subject", subject),
diff --git a/internal/audit/target.go b/internal/audit/target.go
index 2d6881e29..8bc87ee8e 100644
--- a/internal/audit/target.go
+++ b/internal/audit/target.go
@@ -3,7 +3,7 @@ package audit
import (
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 626372f43..7aef6873e 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -14,9 +14,13 @@ const (
InterruptPlacementIterationByContext = "interrupt placement iteration by context"
Notification = "notification"
+
+ SkipDeprecatedNotification = "skip deprecated notification"
)
const (
+ InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations"
+ InnerringCantStopEpochEstimation = "can't stop epoch estimation"
InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain"
InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain"
InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
@@ -37,6 +41,8 @@ const (
InnerringCantUpdatePersistentState = "can't update persistent state"
InnerringCloserError = "closer error"
InnerringReadConfigFromBlockchain = "read config from blockchain"
+ NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications"
+ NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification"
PolicerCouldNotGetContainer = "could not get container"
PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
@@ -55,6 +61,7 @@ const (
ReplicatorCouldNotReplicateObject = "could not replicate object"
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
TreeRedirectingTreeServiceQuery = "redirecting tree service query"
+ TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL"
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
TreeSynchronizeTree = "synchronize tree"
@@ -100,6 +107,7 @@ const (
GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed"
GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object"
GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object"
+ GetUnableToGetECObjectContainer = "unable to get container for erasure-coded object"
GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object"
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
GetAssemblingECObjectCompleted = "assembling erasure-coded object completed"
@@ -125,6 +133,7 @@ const (
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
SearchLocalOperationFailed = "local operation failed"
UtilObjectServiceError = "object service error"
+ UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool"
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
@@ -139,12 +148,14 @@ const (
ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked"
ClientNotaryRequestInvoked = "notary request invoked"
ClientNotaryDepositTransactionWasSuccessfullyPersisted = "notary deposit transaction was successfully persisted"
+ ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted = "attempt to wait for notary deposit transaction to get persisted"
ClientNeoClientInvoke = "neo client invoke"
ClientNativeGasTransferInvoke = "native gas transfer invoke"
ClientBatchGasTransferInvoke = "batch gas transfer invoke"
ClientCantGetBlockchainHeight = "can't get blockchain height"
ClientCantGetBlockchainHeight243 = "can't get blockchain height"
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
+ EventCouldNotStartListenToEvents = "could not start listen to events"
EventStopEventListenerByError = "stop event listener by error"
EventStopEventListenerByContext = "stop event listener by context"
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
@@ -162,9 +173,17 @@ const (
EventNotaryParserNotSet = "notary parser not set"
EventCouldNotParseNotaryEvent = "could not parse notary event"
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
+ EventIgnoreNilEventParser = "ignore nil event parser"
+ EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
EventRegisteredNewEventParser = "registered new event parser"
+ EventIgnoreNilEventHandler = "ignore nil event handler"
+ EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
EventRegisteredNewEventHandler = "registered new event handler"
+ EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
+ EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
+ EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
+ EventIgnoreNilBlockHandler = "ignore nil block handler"
StorageOperation = "local object storage operation"
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
BlobovniczaOpeningBoltDB = "opening BoltDB"
@@ -198,7 +217,6 @@ const (
EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks"
EngineInterruptGettingLockers = "can't get object's lockers"
EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks"
- EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones"
EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only"
EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode"
EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold"
@@ -208,6 +226,12 @@ const (
EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
+ MetabaseMissingMatcher = "missing matcher"
+ MetabaseErrorInFKBTSelection = "error in FKBT selection"
+ MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf"
+ MetabaseUnknownOperation = "unknown operation"
+ MetabaseCantIterateOverTheBucket = "can't iterate over the bucket"
+ MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets"
MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
MetabaseCheckingMetabaseVersion = "checking metabase version"
@@ -225,7 +249,6 @@ const (
ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
ShardCouldNotUnmarshalObject = "could not unmarshal object"
- ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted"
ShardCouldNotCloseShardComponent = "could not close shard component"
ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
@@ -253,8 +276,9 @@ const (
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
- ShardCouldNotFindObject = "could not find object"
+ WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache"
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
+ WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database"
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
@@ -289,6 +313,9 @@ const (
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
ContainerDeleteContainerCheckFailed = "delete container check failed"
ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
+ ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL"
+ ContainerSetEACLCheckFailed = "set EACL check failed"
+ ContainerCouldNotApproveSetEACL = "could not approve set EACL"
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
FrostFSCantRelaySetConfigEvent = "can't relay set config event"
FrostFSFrostfsWorkerPool = "frostfs worker pool"
@@ -333,6 +360,7 @@ const (
NetmapCantGetTransactionHeight = "can't get transaction height"
NetmapCantResetEpochTimer = "can't reset epoch timer"
NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup"
+ NetmapCantStartContainerSizeEstimation = "can't start container size estimation"
NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick"
NetmapNextEpoch = "next epoch"
NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch"
@@ -384,6 +412,7 @@ const (
FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown"
FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing"
FrostFSNodeConfigurationReading = "configuration reading"
+ FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
FrostFSNodePoolConfigurationUpdate = "adjust pool configuration"
@@ -392,6 +421,11 @@ const (
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
+ FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers"
+ FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container"
+ FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object"
+ FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications"
+ FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification"
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
FrostFSNodeFailedInitTracing = "failed init tracing"
@@ -435,6 +469,7 @@ const (
FSTreeCantUnmarshalObject = "can't unmarshal an object"
FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
FSTreeCantUpdateID = "can't update object storage ID"
+ FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB"
PutSingleRedirectFailure = "failed to redirect PutSingle request"
StorageIDRetrievalFailure = "can't get storage ID from metabase"
ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
@@ -510,12 +545,4 @@ const (
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
WritecacheCantGetObject = "can't get an object from fstree"
- FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
- FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
- NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
- FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
- FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
- WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
- FailedToUpdateNetmapCandidates = "update netmap candidates failed"
- UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used"
)
diff --git a/internal/metrics/application.go b/internal/metrics/application.go
index 53acf9b7f..8bc408ab6 100644
--- a/internal/metrics/application.go
+++ b/internal/metrics/application.go
@@ -12,9 +12,8 @@ type ApplicationInfo struct {
func NewApplicationInfo(version string) *ApplicationInfo {
appInfo := &ApplicationInfo{
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Name: "app_info",
- Help: "General information about the application.",
+ Name: "app_info",
+ Help: "General information about the application.",
}, []string{"version"}),
}
appInfo.versionValue.With(prometheus.Labels{"version": version})
diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go
index 9123541ff..3aa51c0f0 100644
--- a/internal/metrics/consts.go
+++ b/internal/metrics/consts.go
@@ -22,8 +22,6 @@ const (
grpcServerSubsystem = "grpc_server"
policerSubsystem = "policer"
commonCacheSubsystem = "common_cache"
- multinetSubsystem = "multinet"
- qosSubsystem = "qos"
successLabel = "success"
shardIDLabel = "shard_id"
@@ -43,8 +41,6 @@ const (
endpointLabel = "endpoint"
hitLabel = "hit"
cacheLabel = "cache"
- sourceIPLabel = "source_ip"
- ioTagLabel = "io_tag"
readWriteMode = "READ_WRITE"
readOnlyMode = "READ_ONLY"
diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go
index d0cb8131f..f6b14a632 100644
--- a/internal/metrics/innerring.go
+++ b/internal/metrics/innerring.go
@@ -17,9 +17,7 @@ type InnerRingServiceMetrics struct {
eventDuration *prometheus.HistogramVec
morphCacheMetrics *morphCacheMetrics
logMetrics logger.LogMetrics
- multinet *multinetMetrics
- // nolint: unused
- appInfo *ApplicationInfo
+ appInfo *ApplicationInfo
}
// NewInnerRingMetrics returns new instance of metrics collectors for inner ring.
@@ -52,7 +50,6 @@ func NewInnerRingMetrics() *InnerRingServiceMetrics {
morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace),
appInfo: NewApplicationInfo(misc.Version),
logMetrics: logger.NewLogMetrics(innerRingNamespace),
- multinet: newMultinetMetrics(innerRingNamespace),
}
}
@@ -80,7 +77,3 @@ func (m *InnerRingServiceMetrics) MorphCacheMetrics() MorphCacheMetrics {
func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics {
return m.logMetrics
}
-
-func (m *InnerRingServiceMetrics) Multinet() MultinetMetrics {
- return m.multinet
-}
diff --git a/internal/metrics/multinet.go b/internal/metrics/multinet.go
deleted file mode 100644
index 6b1f99d46..000000000
--- a/internal/metrics/multinet.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package metrics
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type multinetMetrics struct {
- dials *prometheus.GaugeVec
-}
-
-type MultinetMetrics interface {
- Dial(sourceIP string, success bool)
-}
-
-func newMultinetMetrics(ns string) *multinetMetrics {
- return &multinetMetrics{
- dials: metrics.NewGaugeVec(
- prometheus.GaugeOpts{
- Namespace: ns,
- Subsystem: multinetSubsystem,
- Name: "dial_count",
- Help: "Dials count performed by multinet",
- }, []string{sourceIPLabel, successLabel}),
- }
-}
-
-func (m *multinetMetrics) Dial(sourceIP string, success bool) {
- m.dials.With(prometheus.Labels{
- sourceIPLabel: sourceIP,
- successLabel: strconv.FormatBool(success),
- }).Inc()
-}
diff --git a/internal/metrics/node.go b/internal/metrics/node.go
index 8ade19eb2..d9e401446 100644
--- a/internal/metrics/node.go
+++ b/internal/metrics/node.go
@@ -25,10 +25,7 @@ type NodeMetrics struct {
morphClient *morphClientMetrics
morphCache *morphCacheMetrics
log logger.LogMetrics
- multinet *multinetMetrics
- qos *QoSMetrics
- // nolint: unused
- appInfo *ApplicationInfo
+ appInfo *ApplicationInfo
}
func NewNodeMetrics() *NodeMetrics {
@@ -55,8 +52,6 @@ func NewNodeMetrics() *NodeMetrics {
morphCache: newMorphCacheMetrics(namespace),
log: logger.NewLogMetrics(namespace),
appInfo: NewApplicationInfo(misc.Version),
- multinet: newMultinetMetrics(namespace),
- qos: newQoSMetrics(),
}
}
@@ -124,11 +119,3 @@ func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics {
func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
return m.log
}
-
-func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
- return m.multinet
-}
-
-func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
- return m.qos
-}
diff --git a/internal/metrics/object.go b/internal/metrics/object.go
index e4f6dfde1..0ba994ed3 100644
--- a/internal/metrics/object.go
+++ b/internal/metrics/object.go
@@ -9,14 +9,13 @@ import (
)
type ObjectServiceMetrics interface {
- AddRequestDuration(method string, d time.Duration, success bool, ioTag string)
+ AddRequestDuration(method string, d time.Duration, success bool)
AddPayloadSize(method string, size int)
}
type objectServiceMetrics struct {
- methodDuration *prometheus.HistogramVec
- payloadCounter *prometheus.CounterVec
- ioTagOpsCounter *prometheus.CounterVec
+ methodDuration *prometheus.HistogramVec
+ payloadCounter *prometheus.CounterVec
}
func newObjectServiceMetrics() *objectServiceMetrics {
@@ -33,24 +32,14 @@ func newObjectServiceMetrics() *objectServiceMetrics {
Name: "request_payload_bytes",
Help: "Object Service request payload",
}, []string{methodLabel}),
- ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "requests_total",
- Help: "Count of requests for each IO tag",
- }, []string{methodLabel, ioTagLabel}),
}
}
-func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) {
+func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) {
m.methodDuration.With(prometheus.Labels{
methodLabel: method,
successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds())
- m.ioTagOpsCounter.With(prometheus.Labels{
- ioTagLabel: ioTag,
- methodLabel: method,
- }).Inc()
}
func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {
diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go
deleted file mode 100644
index be6878142..000000000
--- a/internal/metrics/qos.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package metrics
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type QoSMetrics struct {
- opsCounter *prometheus.GaugeVec
-}
-
-func newQoSMetrics() *QoSMetrics {
- return &QoSMetrics{
- opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: qosSubsystem,
- Name: "operations_total",
- Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
- }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
- }
-}
-
-func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) {
- m.opsCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- operationLabel: operation,
- ioTagLabel: tag,
- typeLabel: "pending",
- }).Set(float64(pending))
- m.opsCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- operationLabel: operation,
- ioTagLabel: tag,
- typeLabel: "in_progress",
- }).Set(float64(inProgress))
- m.opsCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- operationLabel: operation,
- ioTagLabel: tag,
- typeLabel: "completed",
- }).Set(float64(completed))
- m.opsCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- operationLabel: operation,
- ioTagLabel: tag,
- typeLabel: "resource_exhausted",
- }).Set(float64(resourceExhausted))
-}
-
-func (m *QoSMetrics) Close(shardID string) {
- m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
-}
diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go
index e192c4398..6702aa83c 100644
--- a/internal/metrics/treeservice.go
+++ b/internal/metrics/treeservice.go
@@ -12,14 +12,12 @@ type TreeMetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
- AddOperation(string, string)
}
type treeServiceMetrics struct {
replicateTaskDuration *prometheus.HistogramVec
replicateWaitDuration *prometheus.HistogramVec
syncOpDuration *prometheus.HistogramVec
- ioTagOpsCounter *prometheus.CounterVec
}
var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
@@ -44,12 +42,6 @@ func newTreeServiceMetrics() *treeServiceMetrics {
Name: "sync_duration_seconds",
Help: "Duration of synchronization operations",
}, []string{successLabel}),
- ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: treeServiceSubsystem,
- Name: "requests_total",
- Help: "Count of requests for each IO tag",
- }, []string{methodLabel, ioTagLabel}),
}
}
@@ -70,10 +62,3 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) {
successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds())
}
-
-func (m *treeServiceMetrics) AddOperation(op string, ioTag string) {
- m.ioTagOpsCounter.With(prometheus.Labels{
- ioTagLabel: ioTag,
- methodLabel: op,
- }).Inc()
-}
diff --git a/internal/net/config.go b/internal/net/config.go
deleted file mode 100644
index b84ac3b35..000000000
--- a/internal/net/config.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package net
-
-import (
- "errors"
- "fmt"
- "net/netip"
- "slices"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
- "git.frostfs.info/TrueCloudLab/multinet"
-)
-
-var errEmptySourceIPList = errors.New("empty source IP list")
-
-type Subnet struct {
- Prefix string
- SourceIPs []string
-}
-
-type Config struct {
- Enabled bool
- Subnets []Subnet
- Balancer string
- Restrict bool
- FallbackDelay time.Duration
- Metrics metrics.MultinetMetrics
-}
-
-func (c Config) toMultinetConfig() (multinet.Config, error) {
- var subnets []multinet.Subnet
- for _, s := range c.Subnets {
- var ms multinet.Subnet
- p, err := netip.ParsePrefix(s.Prefix)
- if err != nil {
- return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
- }
- ms.Prefix = p
- for _, ip := range s.SourceIPs {
- addr, err := netip.ParseAddr(ip)
- if err != nil {
- return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
- }
- ms.SourceIPs = append(ms.SourceIPs, addr)
- }
- if len(ms.SourceIPs) == 0 {
- return multinet.Config{}, errEmptySourceIPList
- }
- subnets = append(subnets, ms)
- }
- return multinet.Config{
- Subnets: subnets,
- Balancer: multinet.BalancerType(c.Balancer),
- Restrict: c.Restrict,
- FallbackDelay: c.FallbackDelay,
- Dialer: newDefaulDialer(),
- EventHandler: newEventHandler(c.Metrics),
- }, nil
-}
-
-func (c Config) equals(other Config) bool {
- return c.Enabled == other.Enabled &&
- slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
- return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
- }) &&
- c.Balancer == other.Balancer &&
- c.Restrict == other.Restrict &&
- c.FallbackDelay == other.FallbackDelay
-}
diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go
deleted file mode 100644
index 6265f1860..000000000
--- a/internal/net/dial_target.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
-
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package net
-
-import (
- "net/url"
- "strings"
-)
-
-// parseDialTarget returns the network and address to pass to dialer.
-func parseDialTarget(target string) (string, string) {
- net := "tcp"
- m1 := strings.Index(target, ":")
- m2 := strings.Index(target, ":/")
- // handle unix:addr which will fail with url.Parse
- if m1 >= 0 && m2 < 0 {
- if n := target[0:m1]; n == "unix" {
- return n, target[m1+1:]
- }
- }
- if m2 >= 0 {
- t, err := url.Parse(target)
- if err != nil {
- return net, target
- }
- scheme := t.Scheme
- addr := t.Path
- if scheme == "unix" {
- if addr == "" {
- addr = t.Host
- }
- return scheme, addr
- }
- }
- return net, target
-}
diff --git a/internal/net/dialer.go b/internal/net/dialer.go
deleted file mode 100644
index daf0f815f..000000000
--- a/internal/net/dialer.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package net
-
-import (
- "context"
- "net"
- "syscall"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-type Dialer interface {
- DialContext(ctx context.Context, network, address string) (net.Conn, error)
-}
-
-func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) {
- return d.DialContext(ctx, "tcp", address)
-}
-
-func newDefaulDialer() net.Dialer {
- // From `grpc.WithContextDialer` comment:
- //
- // Note: All supported releases of Go (as of December 2023) override the OS
- // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
- // with OS defaults for keepalive time and interval, use a net.Dialer that sets
- // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
- // option to true from the Control field. For a concrete example of how to do
- // this, see internal.NetDialerWithTCPKeepalive().
- //
- // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
- return net.Dialer{
- KeepAlive: time.Duration(-1),
- Control: func(_, _ string, c syscall.RawConn) error {
- return c.Control(func(fd uintptr) {
- _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
- })
- },
- }
-}
diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go
deleted file mode 100644
index 3d94dedc7..000000000
--- a/internal/net/dialer_source.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package net
-
-import (
- "context"
- "net"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/multinet"
-)
-
-type DialerSource struct {
- guard sync.RWMutex
-
- c Config
-
- md multinet.Dialer
-}
-
-func NewDialerSource(c Config) (*DialerSource, error) {
- result := &DialerSource{}
- if err := result.build(c); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *DialerSource) build(c Config) error {
- if c.Enabled {
- mc, err := c.toMultinetConfig()
- if err != nil {
- return err
- }
- md, err := multinet.NewDialer(mc)
- if err != nil {
- return err
- }
- s.md = md
- s.c = c
- return nil
- }
- s.md = nil
- s.c = c
- return nil
-}
-
-// GrpcContextDialer returns grpc.WithContextDialer func.
-// Returns nil if multinet disabled.
-func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
- s.guard.RLock()
- defer s.guard.RUnlock()
-
- if s.c.Enabled {
- return func(ctx context.Context, address string) (net.Conn, error) {
- network, address := parseDialTarget(address)
- return s.md.DialContext(ctx, network, address)
- }
- }
- return nil
-}
-
-// NetContextDialer returns net.DialContext dial function.
-// Returns nil if multinet disabled.
-func (s *DialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
- s.guard.RLock()
- defer s.guard.RUnlock()
-
- if s.c.Enabled {
- return func(ctx context.Context, network, address string) (net.Conn, error) {
- return s.md.DialContext(ctx, network, address)
- }
- }
- return nil
-}
-
-func (s *DialerSource) Update(c Config) error {
- s.guard.Lock()
- defer s.guard.Unlock()
-
- if s.c.equals(c) {
- return nil
- }
- return s.build(c)
-}
diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go
deleted file mode 100644
index 024e5cf7c..000000000
--- a/internal/net/event_handler.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package net
-
-import (
- "net"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
- "git.frostfs.info/TrueCloudLab/multinet"
-)
-
-var _ multinet.EventHandler = (*metricsEventHandler)(nil)
-
-type metricsEventHandler struct {
- m metrics.MultinetMetrics
-}
-
-func (m *metricsEventHandler) DialPerformed(sourceIP net.Addr, _ string, _ string, err error) {
- sourceIPString := "undefined"
- if sourceIP != nil {
- sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
- }
- m.m.Dial(sourceIPString, err == nil)
-}
-
-func newEventHandler(m metrics.MultinetMetrics) multinet.EventHandler {
- if m == nil {
- return nil
- }
- return &metricsEventHandler{m: m}
-}
diff --git a/internal/qos/config.go b/internal/qos/config.go
deleted file mode 100644
index d90b403b5..000000000
--- a/internal/qos/config.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package qos
-
-import (
- "math"
- "time"
-)
-
-const (
- NoLimit int64 = math.MaxInt64
- DefaultIdleTimeout = 5 * time.Minute
-)
-
-type LimiterConfig struct {
- Read OpConfig
- Write OpConfig
-}
-
-type OpConfig struct {
- MaxWaitingOps int64
- MaxRunningOps int64
- IdleTimeout time.Duration
- Tags []IOTagConfig
-}
-
-type IOTagConfig struct {
- Tag string
- Weight *float64
- LimitOps *float64
- ReservedOps *float64
- Prohibited bool
-}
diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go
deleted file mode 100644
index 58cd9e52c..000000000
--- a/internal/qos/grpc.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package qos
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "google.golang.org/grpc"
-)
-
-func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
- ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String())
- return handler(ctx, req)
- }
-}
-
-func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor {
- return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- rawTag, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- return invoker(ctx, method, req, reply, cc, opts...)
- }
- tag, err := FromRawString(rawTag)
- if err != nil {
- tag = IOTagClient
- }
- if tag.IsLocal() {
- tag = IOTagInternal
- }
- ctx = tagging.ContextWithIOTag(ctx, tag.String())
- return invoker(ctx, method, req, reply, cc, opts...)
- }
-}
-
-func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor {
- return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- rawTag, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- return streamer(ctx, desc, cc, method, opts...)
- }
- tag, err := FromRawString(rawTag)
- if err != nil {
- tag = IOTagClient
- }
- if tag.IsLocal() {
- tag = IOTagInternal
- }
- ctx = tagging.ContextWithIOTag(ctx, tag.String())
- return streamer(ctx, desc, cc, method, opts...)
- }
-}
-
-func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
- if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() {
- return handler(ctx, req)
- }
-
- release, ok := getLimiter().Acquire(info.FullMethod)
- if !ok {
- return nil, new(apistatus.ResourceExhausted)
- }
- defer release()
-
- return handler(ctx, req)
- }
-}
-
-//nolint:contextcheck (grpc.ServerStream manages the context itself)
-func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor {
- return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
- if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() {
- return handler(srv, ss)
- }
-
- release, ok := getLimiter().Acquire(info.FullMethod)
- if !ok {
- return new(apistatus.ResourceExhausted)
- }
- defer release()
-
- return handler(srv, ss)
- }
-}
diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go
deleted file mode 100644
index 7d0826754..000000000
--- a/internal/qos/grpc_test.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package qos_test
-
-import (
- "context"
- "errors"
- "fmt"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "github.com/stretchr/testify/require"
- "google.golang.org/grpc"
-)
-
-const (
- okKey = "ok"
-)
-
-var (
- errTest = errors.New("mock")
- errWrongTag = errors.New("wrong tag")
- errNoTag = errors.New("failed to get tag from context")
- errResExhausted *apistatus.ResourceExhausted
- tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
-)
-
-type mockGRPCServerStream struct {
- grpc.ServerStream
-
- ctx context.Context
-}
-
-func (m *mockGRPCServerStream) Context() context.Context {
- return m.ctx
-}
-
-type limiter struct {
- acquired bool
- released bool
-}
-
-func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
- l.acquired = true
- if key != okKey {
- return nil, false
- }
- return func() { l.released = true }, true
-}
-
-func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
- interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
- handler := func(ctx context.Context, req any) (any, error) {
- return nil, errTest
- }
- _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
- return err
-}
-
-func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
- interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
- handler := func(srv any, stream grpc.ServerStream) error {
- return errTest
- }
- err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
- FullMethod: methodName,
- }, handler)
- return err
-}
-
-func Test_MaxActiveRPCLimiter(t *testing.T) {
- // UnaryServerInterceptor
- t.Run("unary fail", func(t *testing.T) {
- var lim limiter
-
- err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
- require.ErrorAs(t, err, &errResExhausted)
- require.True(t, lim.acquired)
- require.False(t, lim.released)
- })
- t.Run("unary pass critical", func(t *testing.T) {
- var lim limiter
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
-
- err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
- require.ErrorIs(t, err, errTest)
- require.False(t, lim.acquired)
- require.False(t, lim.released)
- })
- t.Run("unary pass", func(t *testing.T) {
- var lim limiter
-
- err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
- require.ErrorIs(t, err, errTest)
- require.True(t, lim.acquired)
- require.True(t, lim.released)
- })
- // StreamServerInterceptor
- t.Run("stream fail", func(t *testing.T) {
- var lim limiter
-
- err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
- require.ErrorAs(t, err, &errResExhausted)
- require.True(t, lim.acquired)
- require.False(t, lim.released)
- })
- t.Run("stream pass critical", func(t *testing.T) {
- var lim limiter
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
-
- err := streamMaxActiveRPCLimiter(ctx, &lim, "")
- require.ErrorIs(t, err, errTest)
- require.False(t, lim.acquired)
- require.False(t, lim.released)
- })
- t.Run("stream pass", func(t *testing.T) {
- var lim limiter
-
- err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
- require.ErrorIs(t, err, errTest)
- require.True(t, lim.acquired)
- require.True(t, lim.released)
- })
-}
-
-func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
- interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
- called := false
- handler := func(ctx context.Context, req any) (any, error) {
- called = true
- if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
- return nil, nil
- }
- return nil, errWrongTag
- }
- _, err := interceptor(context.Background(), nil, nil, handler)
- require.NoError(t, err)
- require.True(t, called)
-}
-
-func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {
- interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor()
-
- // check context with no value
- called := false
- invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
- called = true
- if _, ok := tagging.IOTagFromContext(ctx); ok {
- return fmt.Errorf("%v: expected no IO tags", errWrongTag)
- }
- return nil
- }
- require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil))
- require.True(t, called)
-
- // check context for internal tag
- targetTag := qos.IOTagInternal.String()
- invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
- raw, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- return errNoTag
- }
- if raw != targetTag {
- return errWrongTag
- }
- return nil
- }
- for _, tag := range tags {
- ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
- require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
- }
-
- // check context for client tag
- ctx := tagging.ContextWithIOTag(context.Background(), "")
- targetTag = qos.IOTagClient.String()
- require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
-}
-
-func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) {
- interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor()
-
- // check context with no value
- called := false
- streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- called = true
- if _, ok := tagging.IOTagFromContext(ctx); ok {
- return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag)
- }
- return nil, nil
- }
- _, err := interceptor(context.Background(), nil, nil, "", streamer, nil)
- require.True(t, called)
- require.NoError(t, err)
-
- // check context for internal tag
- targetTag := qos.IOTagInternal.String()
- streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- raw, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- return nil, errNoTag
- }
- if raw != targetTag {
- return nil, errWrongTag
- }
- return nil, nil
- }
- for _, tag := range tags {
- ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
- _, err := interceptor(ctx, nil, nil, "", streamer, nil)
- require.NoError(t, err)
- }
-
- // check context for client tag
- ctx := tagging.ContextWithIOTag(context.Background(), "")
- targetTag = qos.IOTagClient.String()
- _, err = interceptor(ctx, nil, nil, "", streamer, nil)
- require.NoError(t, err)
-}
diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go
deleted file mode 100644
index 2d7de32fc..000000000
--- a/internal/qos/limiter.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package qos
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-const (
- defaultIdleTimeout time.Duration = 0
- defaultShare float64 = 1.0
- minusOne = ^uint64(0)
-
- defaultMetricsCollectTimeout = 5 * time.Second
-)
-
-type ReleaseFunc scheduling.ReleaseFunc
-
-type Limiter interface {
- ReadRequest(context.Context) (ReleaseFunc, error)
- WriteRequest(context.Context) (ReleaseFunc, error)
- SetParentID(string)
- SetMetrics(Metrics)
- Close()
-}
-
-type scheduler interface {
- RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error)
- Close()
-}
-
-func NewLimiter(c LimiterConfig) (Limiter, error) {
- if err := c.Validate(); err != nil {
- return nil, err
- }
- readScheduler, err := createScheduler(c.Read)
- if err != nil {
- return nil, fmt.Errorf("create read scheduler: %w", err)
- }
- writeScheduler, err := createScheduler(c.Write)
- if err != nil {
- return nil, fmt.Errorf("create write scheduler: %w", err)
- }
- l := &mClockLimiter{
- readScheduler: readScheduler,
- writeScheduler: writeScheduler,
- closeCh: make(chan struct{}),
- wg: &sync.WaitGroup{},
- readStats: createStats(),
- writeStats: createStats(),
- }
- l.shardID.Store(&shardID{})
- l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
- l.startMetricsCollect()
- return l, nil
-}
-
-func createScheduler(config OpConfig) (scheduler, error) {
- if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit {
- return newSemaphoreScheduler(config.MaxRunningOps), nil
- }
- return scheduling.NewMClock(
- uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps),
- converToSchedulingTags(config.Tags), config.IdleTimeout)
-}
-
-func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo {
- result := make(map[string]scheduling.TagInfo)
- for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
- result[tag.String()] = scheduling.TagInfo{
- Share: defaultShare,
- }
- }
- for _, l := range limits {
- v := result[l.Tag]
- if l.Weight != nil && *l.Weight != 0 {
- v.Share = *l.Weight
- }
- if l.LimitOps != nil && *l.LimitOps != 0 {
- v.LimitIOPS = l.LimitOps
- }
- if l.ReservedOps != nil && *l.ReservedOps != 0 {
- v.ReservedIOPS = l.ReservedOps
- }
- v.Prohibited = l.Prohibited
- result[l.Tag] = v
- }
- return result
-}
-
-var (
- _ Limiter = (*noopLimiter)(nil)
- releaseStub ReleaseFunc = func() {}
- noopLimiterInstance = &noopLimiter{}
-)
-
-func NewNoopLimiter() Limiter {
- return noopLimiterInstance
-}
-
-type noopLimiter struct{}
-
-func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) {
- return releaseStub, nil
-}
-
-func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
- return releaseStub, nil
-}
-
-func (n *noopLimiter) SetParentID(string) {}
-
-func (n *noopLimiter) Close() {}
-
-func (n *noopLimiter) SetMetrics(Metrics) {}
-
-var _ Limiter = (*mClockLimiter)(nil)
-
-type shardID struct {
- id string
-}
-
-type mClockLimiter struct {
- readScheduler scheduler
- writeScheduler scheduler
-
- readStats map[string]*stat
- writeStats map[string]*stat
-
- shardID atomic.Pointer[shardID]
- metrics atomic.Pointer[metricsHolder]
- closeCh chan struct{}
- wg *sync.WaitGroup
-}
-
-func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
- return requestArrival(ctx, n.readScheduler, n.readStats)
-}
-
-func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
- return requestArrival(ctx, n.writeScheduler, n.writeStats)
-}
-
-func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
- tag, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- tag = IOTagClient.String()
- }
- stat := getStat(tag, stats)
- stat.pending.Add(1)
- if tag == IOTagCritical.String() {
- stat.inProgress.Add(1)
- return func() {
- stat.completed.Add(1)
- }, nil
- }
- rel, err := s.RequestArrival(ctx, tag)
- stat.inProgress.Add(1)
- if err != nil {
- if isResourceExhaustedErr(err) {
- stat.resourceExhausted.Add(1)
- return nil, &apistatus.ResourceExhausted{}
- }
- stat.completed.Add(1)
- return nil, err
- }
- return func() {
- rel()
- stat.completed.Add(1)
- }, nil
-}
-
-func (n *mClockLimiter) Close() {
- n.readScheduler.Close()
- n.writeScheduler.Close()
- close(n.closeCh)
- n.wg.Wait()
- n.metrics.Load().metrics.Close(n.shardID.Load().id)
-}
-
-func (n *mClockLimiter) SetParentID(parentID string) {
- n.shardID.Store(&shardID{id: parentID})
-}
-
-func (n *mClockLimiter) SetMetrics(m Metrics) {
- n.metrics.Store(&metricsHolder{metrics: m})
-}
-
-func (n *mClockLimiter) startMetricsCollect() {
- n.wg.Add(1)
- go func() {
- defer n.wg.Done()
-
- ticker := time.NewTicker(defaultMetricsCollectTimeout)
- defer ticker.Stop()
- for {
- select {
- case <-n.closeCh:
- return
- case <-ticker.C:
- shardID := n.shardID.Load().id
- if shardID == "" {
- continue
- }
- metrics := n.metrics.Load().metrics
- exportMetrics(metrics, n.readStats, shardID, "read")
- exportMetrics(metrics, n.writeStats, shardID, "write")
- }
- }
- }()
-}
-
-func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
- var pending uint64
- var inProgress uint64
- var completed uint64
- var resExh uint64
- for tag, s := range stats {
- pending = s.pending.Load()
- inProgress = s.inProgress.Load()
- completed = s.completed.Load()
- resExh = s.resourceExhausted.Load()
- if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
- continue
- }
- metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
- }
-}
-
-func isResourceExhaustedErr(err error) bool {
- return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
- errors.Is(err, errSemaphoreLimitExceeded) ||
- errors.Is(err, scheduling.ErrTagRequestsProhibited)
-}
diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go
deleted file mode 100644
index c00da51b7..000000000
--- a/internal/qos/metrics.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package qos
-
-import "sync/atomic"
-
-type Metrics interface {
- SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64)
- Close(shardID string)
-}
-
-var _ Metrics = (*noopMetrics)(nil)
-
-type noopMetrics struct{}
-
-func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) {
-}
-
-func (n *noopMetrics) Close(string) {}
-
-// stat presents limiter statistics cumulative counters.
-//
-// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`.
-type stat struct {
- completed atomic.Uint64
- pending atomic.Uint64
- resourceExhausted atomic.Uint64
- inProgress atomic.Uint64
-}
-
-type metricsHolder struct {
- metrics Metrics
-}
diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go
deleted file mode 100644
index 74e6928f3..000000000
--- a/internal/qos/semaphore.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package qos
-
-import (
- "context"
- "errors"
-
- qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
-)
-
-var (
- _ scheduler = (*semaphore)(nil)
- errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded")
-)
-
-type semaphore struct {
- s *qosSemaphore.Semaphore
-}
-
-func newSemaphoreScheduler(size int64) *semaphore {
- return &semaphore{
- s: qosSemaphore.NewSemaphore(size),
- }
-}
-
-func (s *semaphore) Close() {}
-
-func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
-
- if s.s.Acquire() {
- return s.s.Release, nil
- }
- return nil, errSemaphoreLimitExceeded
-}
diff --git a/internal/qos/stats.go b/internal/qos/stats.go
deleted file mode 100644
index 3ecfad9f9..000000000
--- a/internal/qos/stats.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package qos
-
-const unknownStatsTag = "unknown"
-
-var statTags = map[string]struct{}{
- IOTagBackground.String(): {},
- IOTagClient.String(): {},
- IOTagCritical.String(): {},
- IOTagInternal.String(): {},
- IOTagPolicer.String(): {},
- IOTagTreeSync.String(): {},
- IOTagWritecache.String(): {},
- unknownStatsTag: {},
-}
-
-func createStats() map[string]*stat {
- result := make(map[string]*stat)
- for tag := range statTags {
- result[tag] = &stat{}
- }
- return result
-}
-
-func getStat(tag string, stats map[string]*stat) *stat {
- if v, ok := stats[tag]; ok {
- return v
- }
- return stats[unknownStatsTag]
-}
diff --git a/internal/qos/tags.go b/internal/qos/tags.go
deleted file mode 100644
index e3f7cafd6..000000000
--- a/internal/qos/tags.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package qos
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
-)
-
-type IOTag string
-
-const (
- IOTagBackground IOTag = "background"
- IOTagClient IOTag = "client"
- IOTagCritical IOTag = "critical"
- IOTagInternal IOTag = "internal"
- IOTagPolicer IOTag = "policer"
- IOTagTreeSync IOTag = "treesync"
- IOTagWritecache IOTag = "writecache"
-
- ioTagUnknown IOTag = ""
-)
-
-func FromRawString(s string) (IOTag, error) {
- switch s {
- case string(IOTagBackground):
- return IOTagBackground, nil
- case string(IOTagClient):
- return IOTagClient, nil
- case string(IOTagCritical):
- return IOTagCritical, nil
- case string(IOTagInternal):
- return IOTagInternal, nil
- case string(IOTagPolicer):
- return IOTagPolicer, nil
- case string(IOTagTreeSync):
- return IOTagTreeSync, nil
- case string(IOTagWritecache):
- return IOTagWritecache, nil
- default:
- return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
- }
-}
-
-func (t IOTag) String() string {
- return string(t)
-}
-
-func IOTagFromContext(ctx context.Context) string {
- tag, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- tag = "undefined"
- }
- return tag
-}
-
-func (t IOTag) IsLocal() bool {
- return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
-}
diff --git a/internal/qos/validate.go b/internal/qos/validate.go
deleted file mode 100644
index 70f1f24e8..000000000
--- a/internal/qos/validate.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package qos
-
-import (
- "errors"
- "fmt"
- "math"
-)
-
-var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
-
-type tagConfig struct {
- Shares, Limit, Reserved *float64
-}
-
-func (c *LimiterConfig) Validate() error {
- if err := validateOpConfig(c.Read); err != nil {
- return fmt.Errorf("limits 'read' section validation error: %w", err)
- }
- if err := validateOpConfig(c.Write); err != nil {
- return fmt.Errorf("limits 'write' section validation error: %w", err)
- }
- return nil
-}
-
-func validateOpConfig(c OpConfig) error {
- if c.MaxRunningOps <= 0 {
- return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
- }
- if c.MaxWaitingOps <= 0 {
- return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps)
- }
- if c.IdleTimeout <= 0 {
- return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String())
- }
- if err := validateTags(c.Tags); err != nil {
- return fmt.Errorf("'tags' config section validation error: %w", err)
- }
- return nil
-}
-
-func validateTags(configTags []IOTagConfig) error {
- tags := map[IOTag]tagConfig{
- IOTagBackground: {},
- IOTagClient: {},
- IOTagInternal: {},
- IOTagPolicer: {},
- IOTagTreeSync: {},
- IOTagWritecache: {},
- }
- for _, t := range configTags {
- tag, err := FromRawString(t.Tag)
- if err != nil {
- return fmt.Errorf("invalid tag %s: %w", t.Tag, err)
- }
- if _, ok := tags[tag]; !ok {
- return fmt.Errorf("tag %s is not configurable", t.Tag)
- }
- tags[tag] = tagConfig{
- Shares: t.Weight,
- Limit: t.LimitOps,
- Reserved: t.ReservedOps,
- }
- }
- idx := 0
- var shares float64
- for t, v := range tags {
- if idx == 0 {
- idx++
- shares = float64Value(v.Shares)
- } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) {
- return errWeightsMustBeSpecified
- }
- if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) {
- return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String())
- }
- if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) {
- return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String())
- }
- if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) {
- return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String())
- }
- }
- return nil
-}
-
-func float64Value(f *float64) float64 {
- if f == nil {
- return 0.0
- }
- return *f
-}
diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go
index 590b7a885..e547701fb 100644
--- a/pkg/ape/chainbase/option.go
+++ b/pkg/ape/chainbase/option.go
@@ -5,7 +5,9 @@ import (
"os"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.etcd.io/bbolt"
+ "go.uber.org/zap"
)
type Option func(*cfg)
@@ -16,6 +18,7 @@ type cfg struct {
noSync bool
maxBatchDelay time.Duration
maxBatchSize int
+ log *logger.Logger
}
func defaultCfg() *cfg {
@@ -23,6 +26,7 @@ func defaultCfg() *cfg {
perm: os.ModePerm,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
maxBatchSize: bbolt.DefaultMaxBatchSize,
+ log: &logger.Logger{Logger: zap.L()},
}
}
@@ -55,3 +59,9 @@ func WithMaxBatchSize(maxBatchSize int) Option {
c.maxBatchSize = maxBatchSize
}
}
+
+func WithLogger(l *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = l
+ }
+}
diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go
index 8cbb1cce9..953b91a79 100644
--- a/pkg/ape/contract_storage/proxy.go
+++ b/pkg/ape/contract_storage/proxy.go
@@ -31,7 +31,9 @@ type RPCActorProvider interface {
type ProxyVerificationContractStorage struct {
rpcActorProvider RPCActorProvider
- cosigners []actor.SignerAccount
+ acc *wallet.Account
+
+ proxyScriptHash util.Uint160
policyScriptHash util.Uint160
}
@@ -39,27 +41,12 @@ type ProxyVerificationContractStorage struct {
var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil)
func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage {
- acc := wallet.NewAccountFromPrivateKey(key)
return &ProxyVerificationContractStorage{
rpcActorProvider: rpcActorProvider,
- cosigners: []actor.SignerAccount{
- {
- Signer: transaction.Signer{
- Account: proxyScriptHash,
- Scopes: transaction.CustomContracts,
- AllowedContracts: []util.Uint160{policyScriptHash},
- },
- Account: notary.FakeContractAccount(proxyScriptHash),
- },
- {
- Signer: transaction.Signer{
- Account: acc.Contract.ScriptHash(),
- Scopes: transaction.CalledByEntry,
- },
- Account: acc,
- },
- },
+ acc: wallet.NewAccountFromPrivateKey(key),
+
+ proxyScriptHash: proxyScriptHash,
policyScriptHash: policyScriptHash,
}
@@ -77,7 +64,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke {
func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) {
rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
- act, err := actor.New(rpcActor, contractStorage.cosigners)
+ act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash))
if err != nil {
return nil, err
}
@@ -111,16 +98,31 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na
// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners.
func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
- rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
- inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor}
- return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
+ // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
+ // ProxyVerificationContractStorage does not manage reconnections.
+ contractStorageActor, err := contractStorage.newContractStorageActor()
+ if err != nil {
+ return nil, err
+ }
+ return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
}
-type invokerAdapter struct {
- *invoker.Invoker
- rpcInvoker invoker.RPCInvoke
-}
-
-func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
- return n.rpcInvoker
+func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount {
+ return []actor.SignerAccount{
+ {
+ Signer: transaction.Signer{
+ Account: proxyScriptHash,
+ Scopes: transaction.CustomContracts,
+ AllowedContracts: []util.Uint160{policyScriptHash},
+ },
+ Account: notary.FakeContractAccount(proxyScriptHash),
+ },
+ {
+ Signer: transaction.Signer{
+ Account: acc.Contract.ScriptHash(),
+ Scopes: transaction.CalledByEntry,
+ },
+ Account: acc,
+ },
+ }
}
diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go
index d32bd4a07..c0413678d 100644
--- a/pkg/ape/request/frostfsid.go
+++ b/pkg/ape/request/frostfsid.go
@@ -1,7 +1,6 @@
package request
import (
- "context"
"fmt"
"strconv"
"strings"
@@ -13,9 +12,9 @@ import (
)
// FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID.
-func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) {
+func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) {
reqProps := make(map[string]string)
- subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash())
+ subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash())
if err != nil {
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
return nil, fmt.Errorf("get subject error: %w", err)
@@ -37,8 +36,8 @@ func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfs
}
// Groups return the actor's group ids from frostfsid contract.
-func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) {
- subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash())
+func Groups(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) {
+ subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash())
if err != nil {
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
return nil, fmt.Errorf("get subject error: %w", err)
diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go
index 98bdf99e7..854fbc49f 100644
--- a/pkg/core/client/client.go
+++ b/pkg/core/client/client.go
@@ -3,8 +3,8 @@ package client
import (
"context"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go
index 91ee5c6c3..d4bc0cf68 100644
--- a/pkg/core/client/util.go
+++ b/pkg/core/client/util.go
@@ -3,7 +3,6 @@ package client
import (
"bytes"
"fmt"
- "iter"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -20,7 +19,7 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro
// Args must not be nil.
func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface {
PublicKey() []byte
- Addresses() iter.Seq[string]
+ IterateAddresses(func(string) bool)
NumberOfAddresses() int
ExternalAddresses() []string
},
diff --git a/pkg/core/container/delete.go b/pkg/core/container/delete.go
index 8c14bdf5e..8e0aaebb9 100644
--- a/pkg/core/container/delete.go
+++ b/pkg/core/container/delete.go
@@ -1,7 +1,7 @@
package container
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go
deleted file mode 100644
index 1c52d93e7..000000000
--- a/pkg/core/container/info.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package container
-
-import (
- "context"
- "sync"
-
- utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
-)
-
-type Info struct {
- Indexed bool
- Removed bool
-}
-
-type infoValue struct {
- info Info
- err error
-}
-
-type InfoProvider interface {
- Info(ctx context.Context, id cid.ID) (Info, error)
-}
-
-type infoProvider struct {
- mtx *sync.RWMutex
- cache map[cid.ID]infoValue
- kl *utilSync.KeyLocker[cid.ID]
-
- source Source
- sourceErr error
- sourceOnce *sync.Once
- sourceFactory func() (Source, error)
-}
-
-func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider {
- return &infoProvider{
- mtx: &sync.RWMutex{},
- cache: make(map[cid.ID]infoValue),
- sourceOnce: &sync.Once{},
- kl: utilSync.NewKeyLocker[cid.ID](),
- sourceFactory: sourceFactory,
- }
-}
-
-func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) {
- v, found := r.tryGetFromCache(id)
- if found {
- return v.info, v.err
- }
-
- return r.getFromSource(ctx, id)
-}
-
-func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
- r.mtx.RLock()
- defer r.mtx.RUnlock()
-
- value, found := r.cache[id]
- return value, found
-}
-
-func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) {
- r.kl.Lock(id)
- defer r.kl.Unlock(id)
-
- if v, ok := r.tryGetFromCache(id); ok {
- return v.info, v.err
- }
-
- r.sourceOnce.Do(func() {
- r.source, r.sourceErr = r.sourceFactory()
- })
- if r.sourceErr != nil {
- return Info{}, r.sourceErr
- }
-
- cnr, err := r.source.Get(ctx, id)
- var civ infoValue
- if err != nil {
- if client.IsErrContainerNotFound(err) {
- removed, err := WasRemoved(ctx, r.source, id)
- if err != nil {
- civ.err = err
- } else {
- civ.info.Removed = removed
- }
- } else {
- civ.err = err
- }
- } else {
- civ.info.Indexed = IsIndexedContainer(cnr.Value)
- }
- r.putToCache(id, civ)
- return civ.info, civ.err
-}
-
-func (r *infoProvider) putToCache(id cid.ID, ct infoValue) {
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- r.cache[id] = ct
-}
diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go
index 4eb14e53c..69854f495 100644
--- a/pkg/core/container/storage.go
+++ b/pkg/core/container/storage.go
@@ -1,8 +1,6 @@
package container
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@@ -43,9 +41,9 @@ type Source interface {
//
// Implementations must not retain the container pointer and modify
// the container through it.
- Get(ctx context.Context, cid cid.ID) (*Container, error)
+ Get(cid.ID) (*Container, error)
- DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error)
+ DeletionInfo(cid.ID) (*DelInfo, error)
}
// EACL groups information about the FrostFS container's extended ACL stored in
@@ -60,3 +58,16 @@ type EACL struct {
// Session within which Value was set. Nil means session absence.
Session *session.Container
}
+
+// EACLSource is the interface that wraps
+// basic methods of extended ACL table source.
+type EACLSource interface {
+ // GetEACL reads the table from the source by identifier.
+ // It returns any error encountered.
+ //
+ // GetEACL must return exactly one non-nil value.
+ //
+ // Must return apistatus.ErrEACLNotFound if requested
+ // eACL table is not in source.
+ GetEACL(cid.ID) (*EACL, error)
+}
diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go
index 61c568052..98919284e 100644
--- a/pkg/core/container/util.go
+++ b/pkg/core/container/util.go
@@ -1,18 +1,16 @@
package container
import (
- "context"
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// WasRemoved checks whether the container ever existed or
// it just has not been created yet at the current epoch.
-func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) {
- _, err := s.DeletionInfo(ctx, cid)
+func WasRemoved(s Source, cid cid.ID) (bool, error) {
+ _, err := s.DeletionInfo(cid)
if err == nil {
return true, nil
}
@@ -22,14 +20,3 @@ func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) {
}
return false, err
}
-
-// IsIndexedContainer returns True if container attributes should be indexed.
-func IsIndexedContainer(cnr containerSDK.Container) bool {
- var isS3Container bool
- for key := range cnr.Attributes() {
- if key == ".s3-location-constraint" {
- isS3Container = true
- }
- }
- return !isS3Container
-}
diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go
index e752043d3..ecfd0eb15 100644
--- a/pkg/core/frostfsid/subject_provider.go
+++ b/pkg/core/frostfsid/subject_provider.go
@@ -1,8 +1,6 @@
package frostfsid
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -13,6 +11,6 @@ const (
// SubjectProvider interface provides methods to get subject from FrostfsID contract.
type SubjectProvider interface {
- GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error)
- GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error)
+ GetSubject(util.Uint160) (*client.Subject, error)
+ GetSubjectExtended(util.Uint160) (*client.SubjectExtended, error)
}
diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go
index 0c64bb798..29cb2dc94 100644
--- a/pkg/core/netmap/keys.go
+++ b/pkg/core/netmap/keys.go
@@ -2,6 +2,6 @@ package netmap
// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes.
type AnnouncedKeys interface {
- // IsLocalKey checks if the key was announced by a local node.
+ // Checks if the key was announced by a local node.
IsLocalKey(key []byte) bool
}
diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go
index e58e42634..b0c9e1f9e 100644
--- a/pkg/core/netmap/nodes.go
+++ b/pkg/core/netmap/nodes.go
@@ -1,10 +1,6 @@
package netmap
-import (
- "iter"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
+import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
// Node is a named type of netmap.NodeInfo which provides interface needed
// in the current repository. Node is expected to be used everywhere instead
@@ -18,20 +14,10 @@ func (x Node) PublicKey() []byte {
return (netmap.NodeInfo)(x).PublicKey()
}
-// Addresses returns an iterator over all announced network addresses.
-func (x Node) Addresses() iter.Seq[string] {
- return (netmap.NodeInfo)(x).NetworkEndpoints()
-}
-
// IterateAddresses iterates over all announced network addresses
// and passes them into f. Handler MUST NOT be nil.
-// Deprecated: use [Node.Addresses] instead.
func (x Node) IterateAddresses(f func(string) bool) {
- for s := range (netmap.NodeInfo)(x).NetworkEndpoints() {
- if f(s) {
- return
- }
- }
+ (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
}
// NumberOfAddresses returns number of announced network addresses.
diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go
index 97313da84..7770c61c7 100644
--- a/pkg/core/netmap/storage.go
+++ b/pkg/core/netmap/storage.go
@@ -1,8 +1,6 @@
package netmap
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -18,7 +16,7 @@ type Source interface {
//
// Implementations must not retain the network map pointer and modify
// the network map through it.
- GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error)
+ GetNetMap(diff uint64) (*netmap.NetMap, error)
// GetNetMapByEpoch reads network map by the epoch number from the storage.
// It returns the pointer to the requested network map and any error encountered.
@@ -27,21 +25,21 @@ type Source interface {
//
// Implementations must not retain the network map pointer and modify
// the network map through it.
- GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error)
+ GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error)
// Epoch reads the current epoch from the storage.
// It returns thw number of the current epoch and any error encountered.
//
// Must return exactly one non-default value.
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
}
// GetLatestNetworkMap requests and returns the latest network map from the storage.
-func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) {
- return src.GetNetMap(ctx, 0)
+func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) {
+ return src.GetNetMap(0)
}
// GetPreviousNetworkMap requests and returns previous from the latest network map from the storage.
-func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) {
- return src.GetNetMap(ctx, 1)
+func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) {
+ return src.GetNetMap(1)
}
diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go
index cf090eb37..96f721806 100644
--- a/pkg/core/object/fmt.go
+++ b/pkg/core/object/fmt.go
@@ -8,11 +8,11 @@ import (
"fmt"
"strconv"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -117,7 +117,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
}
if !unprepared {
- if err := v.validateSignatureKey(ctx, obj); err != nil {
+ if err := v.validateSignatureKey(obj); err != nil {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
@@ -134,7 +134,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
return nil
}
-func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error {
+func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
sig := obj.Signature()
if sig == nil {
return errMissingSignature
@@ -156,7 +156,7 @@ func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectS
ownerID := obj.OwnerID()
if token == nil && obj.ECHeader() != nil {
- role, err := v.isIROrContainerNode(ctx, obj, binKey)
+ role, err := v.isIROrContainerNode(obj, binKey)
if err != nil {
return err
}
@@ -172,7 +172,7 @@ func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectS
}
if v.verifyTokenIssuer {
- role, err := v.isIROrContainerNode(ctx, obj, binKey)
+ role, err := v.isIROrContainerNode(obj, binKey)
if err != nil {
return err
}
@@ -190,7 +190,7 @@ func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectS
return nil
}
-func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
+func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
cnrID, containerIDSet := obj.ContainerID()
if !containerIDSet {
return acl.RoleOthers, errNilCID
@@ -199,12 +199,12 @@ func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSD
cnrIDBin := make([]byte, sha256.Size)
cnrID.Encode(cnrIDBin)
- cnr, err := v.containers.Get(ctx, cnrID)
+ cnr, err := v.containers.Get(cnrID)
if err != nil {
return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
}
- res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value)
+ res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value)
if err != nil {
return acl.RoleOthers, err
}
@@ -361,7 +361,7 @@ func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Obj
func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) {
for _, a := range obj.Attributes() {
- if a.Key() != objectV2.SysAttributeExpEpoch {
+ if a.Key() != objectV2.SysAttributeExpEpoch && a.Key() != objectV2.SysAttributeExpEpochNeoFS {
continue
}
diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go
index dc336eb34..77afbfc45 100644
--- a/pkg/core/object/fmt_test.go
+++ b/pkg/core/object/fmt_test.go
@@ -7,10 +7,9 @@ import (
"strconv"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -66,7 +65,7 @@ func TestFormatValidator_Validate(t *testing.T) {
epoch: curEpoch,
}),
WithLockSource(ls),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
ownerKey, err := keys.NewPrivateKey()
@@ -291,7 +290,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(false),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
tok := sessiontest.Object()
@@ -340,7 +339,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
},
),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
tok := sessiontest.Object()
@@ -411,14 +410,14 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &utilTesting.TestNetmapSource{
- Netmaps: map[uint64]*netmap.NetMap{
+ &testNetmapSource{
+ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
},
- CurrentEpoch: curEpoch,
+ currentEpoch: curEpoch,
},
),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -484,15 +483,15 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &utilTesting.TestNetmapSource{
- Netmaps: map[uint64]*netmap.NetMap{
+ &testNetmapSource{
+ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
- CurrentEpoch: curEpoch,
+ currentEpoch: curEpoch,
},
),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -560,15 +559,15 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &utilTesting.TestNetmapSource{
- Netmaps: map[uint64]*netmap.NetMap{
+ &testNetmapSource{
+ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
- CurrentEpoch: curEpoch,
+ currentEpoch: curEpoch,
},
),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.Error(t, v.Validate(context.Background(), obj, false))
@@ -579,7 +578,7 @@ type testIRSource struct {
irNodes [][]byte
}
-func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) {
+func (s *testIRSource) InnerRingKeys() ([][]byte, error) {
return s.irNodes, nil
}
@@ -587,13 +586,36 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container
}
-func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
+func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
-func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
+func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
return nil, nil
}
+
+type testNetmapSource struct {
+ netmaps map[uint64]*netmap.NetMap
+ currentEpoch uint64
+}
+
+func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) {
+ if diff >= s.currentEpoch {
+ return nil, fmt.Errorf("invalid diff")
+ }
+ return s.GetNetMapByEpoch(s.currentEpoch - diff)
+}
+
+func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
+ if nm, found := s.netmaps[epoch]; found {
+ return nm, nil
+ }
+ return nil, fmt.Errorf("netmap not found")
+}
+
+func (s *testNetmapSource) Epoch() (uint64, error) {
+ return s.currentEpoch, nil
+}
diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go
index aab12ebf9..67c9a3188 100644
--- a/pkg/core/object/info.go
+++ b/pkg/core/object/info.go
@@ -13,13 +13,6 @@ type ECInfo struct {
Total uint32
}
-func (v *ECInfo) String() string {
- if v == nil {
- return ""
- }
- return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total)
-}
-
// Info groups object address with its FrostFS
// object info.
type Info struct {
@@ -30,5 +23,5 @@ type Info struct {
}
func (v Info) String() string {
- return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo)
+ return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject)
}
diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go
index 3733ed507..13d0ebfb1 100644
--- a/pkg/core/object/sender_classifier.go
+++ b/pkg/core/object/sender_classifier.go
@@ -2,7 +2,6 @@ package object
import (
"bytes"
- "context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -18,7 +17,7 @@ import (
)
type InnerRing interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
+ InnerRingKeys() ([][]byte, error)
}
type SenderClassifier struct {
@@ -41,7 +40,6 @@ type ClassifyResult struct {
}
func (c SenderClassifier) Classify(
- ctx context.Context,
ownerID *user.ID,
ownerKey *keys.PublicKey,
idCnr cid.ID,
@@ -59,15 +57,15 @@ func (c SenderClassifier) Classify(
}, nil
}
- return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr)
+ return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr)
}
-func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
- isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes)
+func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
+ isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil {
// do not throw error, try best case matching
- c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
- zap.Error(err))
+ c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
+ zap.String("error", err.Error()))
} else if isInnerRingNode {
return &ClassifyResult{
Role: acl.RoleInnerRing,
@@ -78,13 +76,13 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
binCnr := make([]byte, sha256.Size)
idCnr.Encode(binCnr)
- isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr)
+ isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr)
if err != nil {
// error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so
// do not throw error, try best case matching
- c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
- zap.Error(err))
+ c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
+ zap.String("error", err.Error()))
} else if isContainerNode {
return &ClassifyResult{
Role: acl.RoleContainer,
@@ -99,8 +97,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
}, nil
}
-func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) {
- innerRingKeys, err := c.innerRing.InnerRingKeys(ctx)
+func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
+ innerRingKeys, err := c.innerRing.InnerRingKeys()
if err != nil {
return false, err
}
@@ -116,11 +114,10 @@ func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (boo
}
func (c SenderClassifier) isContainerKey(
- ctx context.Context,
owner, idCnr []byte,
cnr container.Container,
) (bool, error) {
- nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap
+ nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap
if err != nil {
return false, err
}
@@ -134,7 +131,7 @@ func (c SenderClassifier) isContainerKey(
// then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container
- nm, err = core.GetPreviousNetworkMap(ctx, c.netmap)
+ nm, err = core.GetPreviousNetworkMap(c.netmap)
if err != nil {
return false, err
}
diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go
index dfada764a..c4de07a5f 100644
--- a/pkg/innerring/bindings.go
+++ b/pkg/innerring/bindings.go
@@ -8,6 +8,7 @@ type (
// ContractProcessor interface defines functions for binding event producers
// such as event.Listener and Timers with contract processor.
ContractProcessor interface {
+ ListenerNotificationParsers() []event.NotificationParserInfo
ListenerNotificationHandlers() []event.NotificationHandlerInfo
ListenerNotaryParsers() []event.NotaryParserInfo
ListenerNotaryHandlers() []event.NotaryHandlerInfo
@@ -15,6 +16,11 @@ type (
)
func connectListenerWithProcessor(l event.Listener, p ContractProcessor) {
+ // register notification parsers
+ for _, parser := range p.ListenerNotificationParsers() {
+ l.SetNotificationParser(parser)
+ }
+
// register notification handlers
for _, handler := range p.ListenerNotificationHandlers() {
l.RegisterNotificationHandler(handler)
diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go
index 3f9d8df5f..ad69f207b 100644
--- a/pkg/innerring/blocktimer.go
+++ b/pkg/innerring/blocktimer.go
@@ -3,10 +3,14 @@ package innerring
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
+ "go.uber.org/zap"
)
type (
@@ -15,12 +19,28 @@ type (
EpochDuration() uint64
}
+ alphaState interface {
+ IsAlphabet() bool
+ }
+
newEpochHandler func()
+ containerEstimationStopper interface {
+ StopEstimation(p container.StopEstimationPrm) error
+ }
+
epochTimerArgs struct {
+ l *logger.Logger
+
+ alphabetState alphaState
+
newEpochHandlers []newEpochHandler
- epoch epochState // to specify which epoch to stop, and epoch duration
+ cnrWrapper containerEstimationStopper // to invoke stop container estimation
+ epoch epochState // to specify which epoch to stop, and epoch duration
+
+ stopEstimationDMul uint32 // X: X/Y of epoch in blocks
+ stopEstimationDDiv uint32 // Y: X/Y of epoch in blocks
}
emitTimerArgs struct {
@@ -29,7 +49,7 @@ type (
emitDuration uint32 // in blocks
}
- depositor func(context.Context) (util.Uint256, error)
+ depositor func() (util.Uint256, error)
awaiter func(context.Context, util.Uint256) error
)
@@ -54,7 +74,7 @@ func (s *Server) tickTimers(h uint32) {
}
func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
- return timer.NewBlockTimer(
+ epochTimer := timer.NewBlockTimer(
func() (uint32, error) {
return uint32(args.epoch.EpochDuration()), nil
},
@@ -64,13 +84,42 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
}
},
)
+
+ // sub-timer for epoch timer to tick stop container estimation events at
+ // some block in epoch
+ epochTimer.OnDelta(
+ args.stopEstimationDMul,
+ args.stopEstimationDDiv,
+ func() {
+ if !args.alphabetState.IsAlphabet() {
+ args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations)
+ return
+ }
+
+ epochN := args.epoch.EpochCounter()
+ if epochN == 0 { // estimates are invalid in genesis epoch
+ return
+ }
+
+ prm := container.StopEstimationPrm{}
+ prm.SetEpoch(epochN - 1)
+
+ err := args.cnrWrapper.StopEstimation(prm)
+ if err != nil {
+ args.l.Warn(logs.InnerringCantStopEpochEstimation,
+ zap.Uint64("epoch", epochN),
+ zap.String("error", err.Error()))
+ }
+ })
+
+ return epochTimer
}
-func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer {
+func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer {
return timer.NewBlockTimer(
timer.StaticBlockMeter(args.emitDuration),
func() {
- args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{})
+ args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{})
},
)
}
diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go
index 4cbe7e394..242c0903b 100644
--- a/pkg/innerring/blocktimer_test.go
+++ b/pkg/innerring/blocktimer_test.go
@@ -3,20 +3,29 @@ package innerring
import (
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/stretchr/testify/require"
)
func TestEpochTimer(t *testing.T) {
t.Parallel()
+ alphaState := &testAlphabetState{isAlphabet: true}
neh := &testNewEpochHandler{}
+ cnrStopper := &testContainerEstStopper{}
epochState := &testEpochState{
counter: 99,
duration: 10,
}
args := &epochTimerArgs{
- newEpochHandlers: []newEpochHandler{neh.Handle},
- epoch: epochState,
+ l: test.NewLogger(t),
+ alphabetState: alphaState,
+ newEpochHandlers: []newEpochHandler{neh.Handle},
+ cnrWrapper: cnrStopper,
+ epoch: epochState,
+ stopEstimationDMul: 2,
+ stopEstimationDDiv: 10,
}
et := newEpochTimer(args)
err := et.Reset()
@@ -24,43 +33,63 @@ func TestEpochTimer(t *testing.T) {
et.Tick(100)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 0, cnrStopper.called, "invalid container stop handler calls")
et.Tick(101)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(102)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(103)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
var h uint32
for h = 104; h < 109; h++ {
et.Tick(h)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
}
et.Tick(109)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(110)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(111)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
et.Tick(112)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
et.Tick(113)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
for h = 114; h < 119; h++ {
et.Tick(h)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
}
et.Tick(120)
require.Equal(t, 2, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
+}
+
+type testAlphabetState struct {
+ isAlphabet bool
+}
+
+func (s *testAlphabetState) IsAlphabet() bool {
+ return s.isAlphabet
}
type testNewEpochHandler struct {
@@ -71,6 +100,15 @@ func (h *testNewEpochHandler) Handle() {
h.called++
}
+type testContainerEstStopper struct {
+ called int
+}
+
+func (s *testContainerEstStopper) StopEstimation(_ container.StopEstimationPrm) error {
+ s.called++
+ return nil
+}
+
type testEpochState struct {
counter uint64
duration uint64
diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go
index 7deec3f31..4a80ebf3b 100644
--- a/pkg/innerring/fetcher.go
+++ b/pkg/innerring/fetcher.go
@@ -1,8 +1,6 @@
package innerring
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -49,12 +47,12 @@ type IrFetcherWithoutNotary struct {
// InnerRingKeys fetches list of innerring keys from NeoFSAlphabet
// role in the sidechain.
-func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) {
- return fN.cli.NeoFSAlphabetList(ctx)
+func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) {
+ return fN.cli.NeoFSAlphabetList()
}
// InnerRingKeys fetches list of innerring keys from netmap contract
// in the sidechain.
-func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) {
- return f.nm.GetInnerRingList(ctx)
+func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) {
+ return f.nm.GetInnerRingList()
}
diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go
index 439400bac..45135a57b 100644
--- a/pkg/innerring/indexer.go
+++ b/pkg/innerring/indexer.go
@@ -1,7 +1,6 @@
package innerring
import (
- "context"
"fmt"
"sync"
"time"
@@ -11,7 +10,7 @@ import (
type (
irFetcher interface {
- InnerRingKeys(ctx context.Context) (keys.PublicKeys, error)
+ InnerRingKeys() (keys.PublicKeys, error)
}
committeeFetcher interface {
@@ -46,7 +45,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK
}
}
-func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) {
+func (s *innerRingIndexer) update() (ind indexes, err error) {
s.RLock()
if time.Since(s.lastAccess) < s.timeout {
@@ -63,7 +62,7 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error)
return s.ind, nil
}
- innerRing, err := s.irFetcher.InnerRingKeys(ctx)
+ innerRing, err := s.irFetcher.InnerRingKeys()
if err != nil {
return indexes{}, err
}
@@ -82,8 +81,8 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error)
return s.ind, nil
}
-func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) {
- ind, err := s.update(ctx)
+func (s *innerRingIndexer) InnerRingIndex() (int32, error) {
+ ind, err := s.update()
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -91,8 +90,8 @@ func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) {
return ind.innerRingIndex, nil
}
-func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) {
- ind, err := s.update(ctx)
+func (s *innerRingIndexer) InnerRingSize() (int32, error) {
+ ind, err := s.update()
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -100,8 +99,8 @@ func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) {
return ind.innerRingSize, nil
}
-func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) {
- ind, err := s.update(ctx)
+func (s *innerRingIndexer) AlphabetIndex() (int32, error) {
+ ind, err := s.update()
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go
index f8201b7df..c8a819b5b 100644
--- a/pkg/innerring/indexer_test.go
+++ b/pkg/innerring/indexer_test.go
@@ -1,7 +1,6 @@
package innerring
import (
- "context"
"fmt"
"sync/atomic"
"testing"
@@ -38,15 +37,15 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(2), idx, "invalid IR index")
- size, err := indexer.InnerRingSize(context.Background())
+ size, err := indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(3), size, "invalid IR size")
})
@@ -57,11 +56,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(0), idx, "invalid IR index")
})
@@ -72,11 +71,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
})
@@ -101,30 +100,30 @@ func TestIndexerCachesIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err := indexer.InnerRingSize(context.Background())
+ size, err := indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count")
require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count")
- idx, err = indexer.AlphabetIndex(context.Background())
+ idx, err = indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err = indexer.InnerRingSize(context.Background())
+ size, err = indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -133,15 +132,15 @@ func TestIndexerCachesIndexes(t *testing.T) {
time.Sleep(2 * time.Second)
- idx, err = indexer.AlphabetIndex(context.Background())
+ idx, err = indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err = indexer.InnerRingSize(context.Background())
+ size, err = indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -166,15 +165,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index")
- size, err := indexer.InnerRingSize(context.Background())
+ size, err := indexer.InnerRingSize()
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -190,15 +189,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer = newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err = indexer.AlphabetIndex(context.Background())
+ idx, err = indexer.AlphabetIndex()
require.ErrorContains(t, err, "test IR error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index")
- size, err = indexer.InnerRingSize(context.Background())
+ size, err = indexer.InnerRingSize()
require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size")
}
@@ -220,7 +219,7 @@ type testIRFetcher struct {
calls atomic.Int32
}
-func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) {
+func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
f.calls.Add(1)
return f.keys, f.err
}
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 3d236641e..7da0a9794 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -35,30 +35,31 @@ import (
"google.golang.org/grpc"
)
-func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
+func (s *Server) initNetmapProcessor(cfg *viper.Viper,
+ cnrClient *container.Client,
alphaSync event.Handler,
) error {
- locodeValidator := s.newLocodeValidator(cfg)
+ locodeValidator, err := s.newLocodeValidator(cfg)
+ if err != nil {
+ return err
+ }
netSettings := (*networkSettings)(s.netmapClient)
var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
netMapCandidateStateValidator.SetNetworkSettings(netSettings)
- poolSize := cfg.GetInt("workers.netmap")
- s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
-
- var err error
s.netmapProcessor, err = netmap.New(&netmap.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.netmap"),
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
EpochTimer: s,
EpochState: s,
AlphabetState: s,
CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"),
CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"),
+ ContainerWrapper: cnrClient,
NotaryDepositHandler: s.onlyAlphabetEventHandler(
s.notaryHandler,
),
@@ -98,7 +99,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
- s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
+ s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
}
mainnetChain.from = fromMainChainBlock
@@ -138,12 +139,12 @@ func (s *Server) enableNotarySupport() error {
return nil
}
-func (s *Server) initNotaryConfig(ctx context.Context) {
+func (s *Server) initNotaryConfig() {
s.mainNotaryConfig = notaryConfigs(
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
)
- s.log.Info(ctx, logs.InnerringNotarySupport,
+ s.log.Info(logs.InnerringNotarySupport,
zap.Bool("sidechain_enabled", true),
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
)
@@ -153,15 +154,16 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
var alphaSync event.Handler
if s.withoutMainNet || cfg.GetBool("governance.disable") {
- alphaSync = func(ctx context.Context, _ event.Event) {
- s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled)
+ alphaSync = func(event.Event) {
+ s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
}
} else {
// create governance processor
governanceProcessor, err := governance.New(&governance.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
FrostFSClient: frostfsCli,
+ NetmapClient: s.netmapClient,
AlphabetState: s,
EpochState: s,
Voter: s,
@@ -197,16 +199,21 @@ func (s *Server) createIRFetcher() irFetcher {
return irf
}
-func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
+func (s *Server) initTimers(cfg *viper.Viper, morphClients *serverMorphClients) {
s.epochTimer = newEpochTimer(&epochTimerArgs{
- newEpochHandlers: s.newEpochTickHandlers(ctx),
- epoch: s,
+ l: s.log,
+ alphabetState: s,
+ newEpochHandlers: s.newEpochTickHandlers(),
+ cnrWrapper: morphClients.CnrClient,
+ epoch: s,
+ stopEstimationDMul: cfg.GetUint32("timers.stop_estimation.mul"),
+ stopEstimationDDiv: cfg.GetUint32("timers.stop_estimation.div"),
})
s.addBlockTimer(s.epochTimer)
// initialize emission timer
- emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{
+ emissionTimer := newEmissionTimer(&emitTimerArgs{
ap: s.alphabetProcessor,
emitDuration: cfg.GetUint32("timers.emit"),
})
@@ -214,20 +221,18 @@ func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
s.addBlockTimer(emissionTimer)
}
-func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error {
+func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
if err != nil {
return err
}
- poolSize := cfg.GetInt("workers.alphabet")
- s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize))
// create alphabet processor
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
ParsedWallets: parsedWallets,
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.alphabet"),
AlphabetContracts: s.contracts.alphabet,
NetmapClient: s.netmapClient,
MorphClient: s.morphClient,
@@ -242,14 +247,12 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er
return err
}
-func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
- poolSize := cfg.GetInt("workers.container")
- s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
+func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
// container processor
containerProcessor, err := cont.New(&cont.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.container"),
AlphabetState: s,
ContainerClient: cnrClient,
MorphClient: cnrClient.Morph(),
@@ -263,14 +266,12 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c
return bindMorphProcessor(containerProcessor, s)
}
-func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
- poolSize := cfg.GetInt("workers.balance")
- s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
+func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
// create balance processor
balanceProcessor, err := balance.New(&balance.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.balance"),
FrostFSClient: frostfsCli,
BalanceSC: s.contracts.balance,
AlphabetState: s,
@@ -283,17 +284,15 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro
return bindMorphProcessor(balanceProcessor, s)
}
-func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error {
+func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
if s.withoutMainNet {
return nil
}
- poolSize := cfg.GetInt("workers.frostfs")
- s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
frostfsProcessor, err := frostfs.New(&frostfs.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.frostfs"),
FrostFSContract: s.contracts.frostfs,
BalanceClient: s.balanceClient,
NetmapClient: s.netmapClient,
@@ -313,10 +312,10 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip
return bindMainnetProcessor(frostfsProcessor, s)
}
-func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
+func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
if controlSvcEndpoint == "" {
- s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified)
+ s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
return nil
}
@@ -342,7 +341,7 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg
controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient,
controlsrv.WithAllowedKeys(authKeys),
- ), log.WithTag(logger.TagGrpcSvc), audit)
+ ), log, audit)
grpcControlSrv := grpc.NewServer()
control.RegisterControlServiceServer(grpcControlSrv, controlSvc)
@@ -378,6 +377,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
// form morph container client's options
morphCnrOpts := make([]container.Option, 0, 3)
morphCnrOpts = append(morphCnrOpts,
+ container.TryNotary(),
container.AsAlphabet(),
)
@@ -387,12 +387,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
}
s.containerClient = result.CnrClient
- s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet())
+ s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet())
if err != nil {
return nil, err
}
- s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet())
+ s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet())
if err != nil {
return nil, err
}
@@ -411,7 +411,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
return result, nil
}
-func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error {
+func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error {
irf := s.createIRFetcher()
s.statusIndex = newInnerRingIndexer(
@@ -426,27 +426,27 @@ func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClie
return err
}
- err = s.initNetmapProcessor(ctx, cfg, alphaSync)
+ err = s.initNetmapProcessor(cfg, morphClients.CnrClient, alphaSync)
if err != nil {
return err
}
- err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
+ err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
if err != nil {
return err
}
- err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient)
+ err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient)
if err != nil {
return err
}
- err = s.initFrostFSMainnetProcessor(ctx, cfg)
+ err = s.initFrostFSMainnetProcessor(cfg)
if err != nil {
return err
}
- err = s.initAlphabetProcessor(ctx, cfg)
+ err = s.initAlphabetProcessor(cfg)
return err
}
@@ -454,17 +454,16 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
- s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
+ s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
morphChain := &chainParams{
- log: s.log.WithTag(logger.TagMorph),
+ log: s.log,
cfg: cfg,
key: s.key,
name: morphPrefix,
from: fromSideChainBlock,
morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
- multinetMetrics: s.irMetrics.Multinet(),
}
// create morph client
@@ -479,7 +478,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
return nil, err
}
if err := s.morphClient.SetGroupSignerScope(); err != nil {
- morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
+ morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
}
return morphChain, nil
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 3a5137261..50a37845b 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
- internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
@@ -104,8 +103,6 @@ type (
// to the application.
runners []func(chan<- error) error
- // cmode used for upgrade scenario.
- // nolint:unused
cmode *atomic.Bool
}
@@ -117,7 +114,6 @@ type (
sgn *transaction.Signer
from uint32 // block height
morphCacheMetric metrics.MorphCacheMetrics
- multinetMetrics metrics.MultinetMetrics
}
)
@@ -140,10 +136,10 @@ var (
// Start runs all event providers.
func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
- s.setHealthStatus(ctx, control.HealthStatus_STARTING)
+ s.setHealthStatus(control.HealthStatus_STARTING)
defer func() {
if err == nil {
- s.setHealthStatus(ctx, control.HealthStatus_READY)
+ s.setHealthStatus(control.HealthStatus_READY)
}
}()
@@ -152,12 +148,12 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
return err
}
- err = s.initConfigFromBlockchain(ctx)
+ err = s.initConfigFromBlockchain()
if err != nil {
return err
}
- if s.IsAlphabet(ctx) {
+ if s.IsAlphabet() {
err = s.initMainNotary(ctx)
if err != nil {
return err
@@ -173,14 +169,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
prm.Validators = s.predefinedValidators
// vote for sidechain validator if it is prepared in config
- err = s.voteForSidechainValidator(ctx, prm)
+ err = s.voteForSidechainValidator(prm)
if err != nil {
// we don't stop inner ring execution on this error
- s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
- zap.Error(err))
+ s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
+ zap.String("error", err.Error()))
}
- s.tickInitialExpoch(ctx)
+ s.tickInitialExpoch()
morphErr := make(chan error)
mainnnetErr := make(chan error)
@@ -217,14 +213,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
}
func (s *Server) registerMorphNewBlockEventHandler() {
- s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
- s.log.Debug(ctx, logs.InnerringNewBlock,
+ s.morphListener.RegisterBlockHandler(func(b *block.Block) {
+ s.log.Debug(logs.InnerringNewBlock,
zap.Uint32("index", b.Index),
)
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
if err != nil {
- s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
+ s.log.Warn(logs.InnerringCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", b.Index))
}
@@ -235,10 +231,10 @@ func (s *Server) registerMorphNewBlockEventHandler() {
func (s *Server) registerMainnetNewBlockEventHandler() {
if !s.withoutMainNet {
- s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
+ s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
if err != nil {
- s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
+ s.log.Warn(logs.InnerringCantUpdatePersistentState,
zap.String("chain", "main"),
zap.Uint32("block_index", b.Index))
}
@@ -283,11 +279,11 @@ func (s *Server) initSideNotary(ctx context.Context) error {
)
}
-func (s *Server) tickInitialExpoch(ctx context.Context) {
+func (s *Server) tickInitialExpoch() {
initialEpochTicker := timer.NewOneTickTimer(
timer.StaticBlockMeter(s.initialEpochTickDelta),
func() {
- s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
+ s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
})
s.addBlockTimer(initialEpochTicker)
}
@@ -299,16 +295,16 @@ func (s *Server) startWorkers(ctx context.Context) {
}
// Stop closes all subscription channels.
-func (s *Server) Stop(ctx context.Context) {
- s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
+func (s *Server) Stop() {
+ s.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
go s.morphListener.Stop()
go s.mainnetListener.Stop()
for _, c := range s.closers {
if err := c(); err != nil {
- s.log.Warn(ctx, logs.InnerringCloserError,
- zap.Error(err),
+ s.log.Warn(logs.InnerringCloserError,
+ zap.String("error", err.Error()),
)
}
}
@@ -339,7 +335,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
) (*Server, error) {
var err error
server := &Server{
- log: log.WithTag(logger.TagIr),
+ log: log,
irMetrics: metrics,
cmode: cmode,
}
@@ -349,7 +345,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
- server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED)
+ server.setHealthStatus(control.HealthStatus_HEALTH_STATUS_UNDEFINED)
// parse notary support
server.feeConfig = config.NewFeeConfig(cfg)
@@ -376,7 +372,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
- server.initNotaryConfig(ctx)
+ server.initNotaryConfig()
err = server.initContracts(cfg)
if err != nil {
@@ -400,14 +396,14 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
- err = server.initProcessors(ctx, cfg, morphClients)
+ err = server.initProcessors(cfg, morphClients)
if err != nil {
return nil, err
}
- server.initTimers(ctx, cfg)
+ server.initTimers(cfg, morphClients)
- err = server.initGRPCServer(ctx, cfg, log, audit)
+ err = server.initGRPCServer(cfg, log, audit)
if err != nil {
return nil, err
}
@@ -438,7 +434,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
}
listener, err := event.NewListener(event.ListenerParams{
- Logger: p.log.With(zap.String("chain", p.name)),
+ Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
Subscriber: sub,
})
if err != nil {
@@ -488,12 +484,6 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
return nil, fmt.Errorf("%s chain client endpoints not provided", p.name)
}
- nc := parseMultinetConfig(p.cfg, p.multinetMetrics)
- ds, err := internalNet.NewDialerSource(nc)
- if err != nil {
- return nil, fmt.Errorf("dialer source: %w", err)
- }
-
return client.New(
ctx,
p.key,
@@ -506,7 +496,6 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
}),
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
client.WithMorphCacheMetrics(p.morphCacheMetric),
- client.WithDialerSource(ds),
)
}
@@ -551,43 +540,21 @@ func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) {
return extraWallets, nil
}
-func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNet.Config {
- nc := internalNet.Config{
- Enabled: cfg.GetBool("multinet.enabled"),
- Balancer: cfg.GetString("multinet.balancer"),
- Restrict: cfg.GetBool("multinet.restrict"),
- FallbackDelay: cfg.GetDuration("multinet.fallback_delay"),
- Metrics: m,
- }
- for i := 0; ; i++ {
- mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i))
- if mask == "" {
- break
- }
- sourceIPs := cfg.GetStringSlice(fmt.Sprintf("multinet.subnets.%d.source_ips", i))
- nc.Subnets = append(nc.Subnets, internalNet.Subnet{
- Prefix: mask,
- SourceIPs: sourceIPs,
- })
- }
- return nc
-}
-
-func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
+func (s *Server) initConfigFromBlockchain() error {
// get current epoch
- epoch, err := s.netmapClient.Epoch(ctx)
+ epoch, err := s.netmapClient.Epoch()
if err != nil {
return fmt.Errorf("can't read epoch number: %w", err)
}
// get current epoch duration
- epochDuration, err := s.netmapClient.EpochDuration(ctx)
+ epochDuration, err := s.netmapClient.EpochDuration()
if err != nil {
return fmt.Errorf("can't read epoch duration: %w", err)
}
// get balance precision
- balancePrecision, err := s.balanceClient.Decimals(ctx)
+ balancePrecision, err := s.balanceClient.Decimals()
if err != nil {
return fmt.Errorf("can't read balance contract precision: %w", err)
}
@@ -597,14 +564,14 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
s.precision.SetBalancePrecision(balancePrecision)
// get next epoch delta tick
- s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx)
+ s.initialEpochTickDelta, err = s.nextEpochBlockDelta()
if err != nil {
return err
}
- s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain,
- zap.Bool("active", s.IsActive(ctx)),
- zap.Bool("alphabet", s.IsAlphabet(ctx)),
+ s.log.Debug(logs.InnerringReadConfigFromBlockchain,
+ zap.Bool("active", s.IsActive()),
+ zap.Bool("alphabet", s.IsAlphabet()),
zap.Uint64("epoch", epoch),
zap.Uint32("precision", balancePrecision),
zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta),
@@ -613,8 +580,8 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
return nil
}
-func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) {
- epochBlock, err := s.netmapClient.LastEpochBlock(ctx)
+func (s *Server) nextEpochBlockDelta() (uint32, error) {
+ epochBlock, err := s.netmapClient.LastEpochBlock()
if err != nil {
return 0, fmt.Errorf("can't read last epoch block: %w", err)
}
@@ -635,17 +602,17 @@ func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) {
// onlyAlphabet wrapper around event handler that executes it
// only if inner ring node is alphabet node.
func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler {
- return func(ctx context.Context, ev event.Event) {
- if s.IsAlphabet(ctx) {
- f(ctx, ev)
+ return func(ev event.Event) {
+ if s.IsAlphabet() {
+ f(ev)
}
}
}
-func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler {
+func (s *Server) newEpochTickHandlers() []newEpochHandler {
newEpochHandlers := []newEpochHandler{
func() {
- s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
+ s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
},
}
diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go
index ae4c85168..a0c3ea751 100644
--- a/pkg/innerring/locode.go
+++ b/pkg/innerring/locode.go
@@ -9,7 +9,7 @@ import (
"github.com/spf13/viper"
)
-func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
+func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) {
locodeDB := locodebolt.New(locodebolt.Prm{
Path: cfg.GetString("locode.db.path"),
},
@@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
return irlocode.New(irlocode.Prm{
DB: (*locodeBoltDBWrapper)(locodeDB),
- })
+ }), nil
}
type locodeBoltEntryWrapper struct {
diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go
index fb11e9426..9961710ca 100644
--- a/pkg/innerring/netmap.go
+++ b/pkg/innerring/netmap.go
@@ -1,7 +1,6 @@
package innerring
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@@ -18,8 +17,8 @@ type networkSettings netmapclient.Client
// MaintenanceModeAllowed requests network configuration from the Sidechain
// and check allowance of storage node's maintenance mode according to it.
// Always returns state.ErrMaintenanceModeDisallowed.
-func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error {
- allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx)
+func (s *networkSettings) MaintenanceModeAllowed() error {
+ allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed()
if err != nil {
return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err)
} else if allowed {
diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go
index c8a69962f..e6f2b1de4 100644
--- a/pkg/innerring/notary.go
+++ b/pkg/innerring/notary.go
@@ -28,39 +28,37 @@ const (
gasDivisor = 2
)
-func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) {
+func (s *Server) depositMainNotary() (tx util.Uint256, err error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err)
}
return s.mainnetClient.DepositNotary(
- ctx,
depositAmount,
uint32(s.epochDuration.Load())+notaryExtraBlocks,
)
}
-func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) {
+func (s *Server) depositSideNotary() (tx util.Uint256, err error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err)
}
- tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount)
- return tx, err
+ return s.morphClient.DepositEndlessNotary(depositAmount)
}
-func (s *Server) notaryHandler(ctx context.Context, _ event.Event) {
+func (s *Server) notaryHandler(_ event.Event) {
if !s.mainNotaryConfig.disabled {
- _, err := s.depositMainNotary(ctx)
+ _, err := s.depositMainNotary()
if err != nil {
- s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
+ s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
}
}
- if _, err := s.depositSideNotary(ctx); err != nil {
- s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
+ if _, err := s.depositSideNotary(); err != nil {
+ s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
}
}
@@ -73,7 +71,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er
}
func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error {
- tx, err := deposit(ctx)
+ tx, err := deposit()
if err != nil {
return err
}
@@ -82,11 +80,11 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
- s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade)
+ s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
return nil
}
- s.log.Info(ctx, msg)
+ s.log.Info(msg)
return await(ctx, tx)
}
diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go
index d6b653282..9de075f17 100644
--- a/pkg/innerring/processors/alphabet/handlers.go
+++ b/pkg/innerring/processors/alphabet/handlers.go
@@ -1,8 +1,6 @@
package alphabet
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
@@ -10,16 +8,16 @@ import (
"go.uber.org/zap"
)
-func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) {
+func (ap *Processor) HandleGasEmission(ev event.Event) {
_ = ev.(timers.NewAlphabetEmitTick)
- ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
+ ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
// send event to the worker pool
- err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) })
+ err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit)
if err != nil {
// there system can be moved into controlled degradation stage
- ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained,
+ ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
zap.Int("capacity", ap.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go
index 1da3c401d..dfda37472 100644
--- a/pkg/innerring/processors/alphabet/handlers_test.go
+++ b/pkg/innerring/processors/alphabet/handlers_test.go
@@ -1,13 +1,11 @@
package alphabet_test
import (
- "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -62,7 +60,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -97,7 +95,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160
+ var parsedWallets []util.Uint160 = []util.Uint160{}
alphabetContracts := innerring.NewAlphabetContracts()
for i := range index + 1 {
@@ -139,7 +137,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -169,7 +167,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160
+ var parsedWallets []util.Uint160 = []util.Uint160{}
alphabetContracts := innerring.NewAlphabetContracts()
for i := range index + 1 {
@@ -178,7 +176,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
morphClient := &testMorphClient{}
- var nodes []netmap.NodeInfo
+ nodes := []netmap.NodeInfo{}
network := &netmap.NetMap{}
network.SetNodes(nodes)
@@ -200,7 +198,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -221,7 +219,7 @@ type testIndexer struct {
index int
}
-func (i *testIndexer) AlphabetIndex(context.Context) int {
+func (i *testIndexer) AlphabetIndex() int {
return i.index
}
@@ -248,7 +246,7 @@ type testMorphClient struct {
batchTransferedGas []batchTransferGas
}
-func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) {
+func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
c.invokedMethods = append(c.invokedMethods,
invokedMethod{
contract: contract,
@@ -256,7 +254,7 @@ func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee f
method: method,
args: args,
})
- return client.InvokeRes{}, nil
+ return 0, nil
}
func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error {
@@ -279,6 +277,6 @@ type testNetmapClient struct {
netmap *netmap.NetMap
}
-func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) {
+func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil
}
diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go
index d3d0f83f2..2317f3e98 100644
--- a/pkg/innerring/processors/alphabet/process_emit.go
+++ b/pkg/innerring/processors/alphabet/process_emit.go
@@ -1,7 +1,6 @@
package alphabet
import (
- "context"
"crypto/elliptic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -14,40 +13,40 @@ import (
const emitMethod = "emit"
-func (ap *Processor) processEmit(ctx context.Context) bool {
- index := ap.irList.AlphabetIndex(ctx)
+func (ap *Processor) processEmit() bool {
+ index := ap.irList.AlphabetIndex()
if index < 0 {
- ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
+ ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
return true
}
contract, ok := ap.alphabetContracts.GetByIndex(index)
if !ok {
- ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
+ ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
zap.Int("index", index))
return false
}
// there is no signature collecting, so we don't need extra fee
- _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
+ _, err := ap.morphClient.Invoke(contract, 0, emitMethod)
if err != nil {
- ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
+ ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
return false
}
if ap.storageEmission == 0 {
- ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff)
+ ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
return true
}
- networkMap, err := ap.netmapClient.NetMap(ctx)
+ networkMap, err := ap.netmapClient.NetMap()
if err != nil {
- ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
- zap.Error(err))
+ ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
+ zap.String("error", err.Error()))
return false
}
@@ -59,7 +58,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
ap.pwLock.RUnlock()
extraLen := len(pw)
- ap.log.Debug(ctx, logs.AlphabetGasEmission,
+ ap.log.Debug(logs.AlphabetGasEmission,
zap.Int("network_map", nmLen),
zap.Int("extra_wallets", extraLen))
@@ -69,37 +68,37 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen))
- ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode)
+ ap.transferGasToNetmapNodes(nmNodes, gasPerNode)
- ap.transferGasToExtraNodes(ctx, pw, gasPerNode)
+ ap.transferGasToExtraNodes(pw, gasPerNode)
return true
}
-func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
+func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
for i := range nmNodes {
keyBytes := nmNodes[i].PublicKey()
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
- ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
- zap.Error(err))
+ ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
+ zap.String("error", err.Error()))
continue
}
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
if err != nil {
- ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
+ ap.log.Warn(logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
}
}
}
-func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) {
+func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) {
if len(pw) > 0 {
err := ap.morphClient.BatchTransferGas(pw, gasPerNode)
if err != nil {
@@ -107,10 +106,10 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
for i, addr := range pw {
receiversLog[i] = addr.StringLE()
}
- ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
+ ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
}
}
diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go
index 0aea74003..ce6679969 100644
--- a/pkg/innerring/processors/alphabet/processor.go
+++ b/pkg/innerring/processors/alphabet/processor.go
@@ -1,26 +1,26 @@
package alphabet
import (
- "context"
"errors"
"fmt"
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
// Indexer is a callback interface for inner ring global state.
Indexer interface {
- AlphabetIndex(context.Context) int
+ AlphabetIndex() int
}
// Contracts is an interface of the storage
@@ -36,11 +36,11 @@ type (
}
netmapClient interface {
- NetMap(ctx context.Context) (*netmap.NetMap, error)
+ NetMap() (*netmap.NetMap, error)
}
morphClient interface {
- Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error)
+ Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error)
TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error
}
@@ -85,6 +85,8 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/alphabet: global state is not set")
}
+ p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
@@ -114,6 +116,11 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) {
ap.pwLock.Unlock()
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ return nil
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil
diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go
index b5d05a02e..e39f3abbd 100644
--- a/pkg/innerring/processors/balance/handlers.go
+++ b/pkg/innerring/processors/balance/handlers.go
@@ -1,7 +1,6 @@
package balance
import (
- "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -11,20 +10,20 @@ import (
"go.uber.org/zap"
)
-func (bp *Processor) handleLock(ctx context.Context, ev event.Event) {
+func (bp *Processor) handleLock(ev event.Event) {
lock := ev.(balanceEvent.Lock)
- bp.log.Info(ctx, logs.Notification,
+ bp.log.Info(logs.Notification,
zap.String("type", "lock"),
zap.String("value", hex.EncodeToString(lock.ID())))
// send an event to the worker pool
err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool {
- return bp.processLock(ctx, &lock)
+ return bp.processLock(&lock)
})
if err != nil {
// there system can be moved into controlled degradation stage
- bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained,
+ bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
zap.Int("capacity", bp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go
index 0fd23d8ab..86a9e15d0 100644
--- a/pkg/innerring/processors/balance/handlers_test.go
+++ b/pkg/innerring/processors/balance/handlers_test.go
@@ -1,7 +1,6 @@
package balance
import (
- "context"
"testing"
"time"
@@ -31,7 +30,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) {
})
require.NoError(t, err, "failed to create processor")
- processor.handleLock(context.Background(), balanceEvent.Lock{})
+ processor.handleLock(balanceEvent.Lock{})
for processor.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -57,7 +56,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) {
})
require.NoError(t, err, "failed to create processor")
- processor.handleLock(context.Background(), balanceEvent.Lock{})
+ processor.handleLock(balanceEvent.Lock{})
for processor.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -70,7 +69,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
@@ -84,7 +83,7 @@ type testFrostFSContractClient struct {
chequeCalls int
}
-func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error {
+func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error {
c.chequeCalls++
return nil
}
diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go
index 60475908c..1d94fa454 100644
--- a/pkg/innerring/processors/balance/process_assets.go
+++ b/pkg/innerring/processors/balance/process_assets.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
@@ -11,9 +9,9 @@ import (
// Process lock event by invoking Cheque method in main net to send assets
// back to the withdraw issuer.
-func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool {
- if !bp.alphabetState.IsAlphabet(ctx) {
- bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock)
+func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
+ if !bp.alphabetState.IsAlphabet() {
+ bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
return true
}
@@ -25,9 +23,9 @@ func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) b
prm.SetLock(lock.LockAccount())
prm.SetHash(lock.TxHash())
- err := bp.frostfsClient.Cheque(ctx, prm)
+ err := bp.frostfsClient.Cheque(prm)
if err != nil {
- bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err))
+ bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go
index 34203b74f..5cc849b5c 100644
--- a/pkg/innerring/processors/balance/processor.go
+++ b/pkg/innerring/processors/balance/processor.go
@@ -1,10 +1,10 @@
package balance
import (
- "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -12,12 +12,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
// PrecisionConverter converts balance amount values.
@@ -26,7 +27,7 @@ type (
}
FrostFSClient interface {
- Cheque(ctx context.Context, p frostfscontract.ChequePrm) error
+ Cheque(p frostfscontract.ChequePrm) error
}
// Processor of events produced by balance contract in the morphchain.
@@ -67,6 +68,8 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/balance: balance precision converter is not set")
}
+ p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err)
@@ -88,16 +91,32 @@ func New(p *Params) (*Processor, error) {
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ var parsers []event.NotificationParserInfo
+
+ // new lock event
+ lock := event.NotificationParserInfo{}
+ lock.SetType(lockNotification)
+ lock.SetScriptHash(bp.balanceSC)
+ lock.SetParser(balanceEvent.ParseLock)
+ parsers = append(parsers, lock)
+
+ return parsers
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return []event.NotificationHandlerInfo{
- {
- Contract: bp.balanceSC,
- Type: lockNotification,
- Parser: balanceEvent.ParseLock,
- Handlers: []event.Handler{bp.handleLock},
- },
- }
+ var handlers []event.NotificationHandlerInfo
+
+ // lock handler
+ lock := event.NotificationHandlerInfo{}
+ lock.SetType(lockNotification)
+ lock.SetScriptHash(bp.balanceSC)
+ lock.SetHandler(bp.handleLock)
+ handlers = append(handlers, lock)
+
+ return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go
index 5334b9a1f..ba12ebb37 100644
--- a/pkg/innerring/processors/container/common.go
+++ b/pkg/innerring/processors/container/common.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/ecdsa"
"errors"
"fmt"
@@ -46,7 +45,7 @@ type signatureVerificationData struct {
// - v.binPublicKey is a public session key
// - session context corresponds to the container and verb in v
// - session is "alive"
-func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error {
+func (cp *Processor) verifySignature(v signatureVerificationData) error {
var err error
var key frostfsecdsa.PublicKeyRFC6979
keyProvided := v.binPublicKey != nil
@@ -59,7 +58,7 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio
}
if len(v.binTokenSession) > 0 {
- return cp.verifyByTokenSession(ctx, v, &key, keyProvided)
+ return cp.verifyByTokenSession(v, &key, keyProvided)
}
if keyProvided {
@@ -78,8 +77,8 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio
return errors.New("signature is invalid or calculated with the key not bound to the container owner")
}
-func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error {
- curEpoch, err := cp.netState.Epoch(ctx)
+func (cp *Processor) checkTokenLifetime(token session.Container) error {
+ curEpoch, err := cp.netState.Epoch()
if err != nil {
return fmt.Errorf("could not read current epoch: %w", err)
}
@@ -91,7 +90,7 @@ func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Conta
return nil
}
-func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
+func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
var tok session.Container
err := tok.Unmarshal(v.binTokenSession)
@@ -119,7 +118,7 @@ func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerifi
return errors.New("owner differs with token owner")
}
- err = cp.checkTokenLifetime(ctx, tok)
+ err = cp.checkTokenLifetime(tok)
if err != nil {
return fmt.Errorf("check session lifetime: %w", err)
}
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index bb038a3cb..8bb89abe2 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -12,40 +11,44 @@ import (
"go.uber.org/zap"
)
-func (cp *Processor) handlePut(ctx context.Context, ev event.Event) {
+func (cp *Processor) handlePut(ev event.Event) {
put := ev.(putEvent)
id := sha256.Sum256(put.Container())
- cp.log.Info(ctx, logs.Notification,
+ cp.log.Info(logs.Notification,
zap.String("type", "container put"),
zap.String("id", base58.Encode(id[:])))
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool {
- return cp.processContainerPut(ctx, put)
+ return cp.processContainerPut(put)
})
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
+ cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
-func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) {
+func (cp *Processor) handleDelete(ev event.Event) {
del := ev.(containerEvent.Delete)
- cp.log.Info(ctx, logs.Notification,
+ cp.log.Info(logs.Notification,
zap.String("type", "container delete"),
zap.String("id", base58.Encode(del.ContainerID())))
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool {
- return cp.processContainerDelete(ctx, del)
+ return cp.processContainerDelete(del)
})
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
+ cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
+
+func (cp *Processor) handleSetEACL(_ event.Event) {
+ cp.log.Warn(logs.SkipDeprecatedNotification, zap.String("type", "set EACL"))
+}
diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go
index 1b3842eb0..dc1e919bb 100644
--- a/pkg/innerring/processors/container/handlers_test.go
+++ b/pkg/innerring/processors/container/handlers_test.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/ecdsa"
"encoding/hex"
"testing"
@@ -72,7 +71,7 @@ func TestPutEvent(t *testing.T) {
nr: nr,
}
- proc.handlePut(context.Background(), event)
+ proc.handlePut(event)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -144,7 +143,7 @@ func TestDeleteEvent(t *testing.T) {
Signature: signature,
}
- proc.handleDelete(context.Background(), ev)
+ proc.handleDelete(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -161,7 +160,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
@@ -170,11 +169,11 @@ type testNetworkState struct {
epoch uint64
}
-func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) {
+func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) {
return s.homHashDisabled, nil
}
-func (s *testNetworkState) Epoch(context.Context) (uint64, error) {
+func (s *testNetworkState) Epoch() (uint64, error) {
return s.epoch, nil
}
@@ -187,7 +186,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 {
return c.contractAddress
}
-func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
+func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) {
key := hex.EncodeToString(cid)
if cont, found := c.get[key]; found {
return cont, nil
@@ -237,6 +236,6 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction)
type testFrostFSIDClient struct{}
-func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
+func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
return &frostfsidclient.Subject{}, nil
}
diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go
index 8e4ab2623..d89b63e82 100644
--- a/pkg/innerring/processors/container/process_container.go
+++ b/pkg/innerring/processors/container/process_container.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"errors"
"fmt"
"strings"
@@ -37,28 +36,28 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam
// Process a new container from the user by checking the container sanity
// and sending approve tx back to the morph.
-func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool {
- if !cp.alphabetState.IsAlphabet(ctx) {
- cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut)
+func (cp *Processor) processContainerPut(put putEvent) bool {
+ if !cp.alphabetState.IsAlphabet() {
+ cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
return true
}
- pctx := &putContainerContext{
+ ctx := &putContainerContext{
e: put,
}
- err := cp.checkPutContainer(ctx, pctx)
+ err := cp.checkPutContainer(ctx)
if err != nil {
- cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
- zap.Error(err),
+ cp.log.Error(logs.ContainerPutContainerCheckFailed,
+ zap.String("error", err.Error()),
)
return false
}
- if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
- cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
- zap.Error(err),
+ if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil {
+ cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
+ zap.String("error", err.Error()),
)
return false
}
@@ -66,8 +65,8 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
return true
}
-func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error {
- binCnr := pctx.e.Container()
+func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
+ binCnr := ctx.e.Container()
var cnr containerSDK.Container
err := cnr.Unmarshal(binCnr)
@@ -75,12 +74,12 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
return fmt.Errorf("invalid binary container: %w", err)
}
- err = cp.verifySignature(ctx, signatureVerificationData{
+ err = cp.verifySignature(signatureVerificationData{
ownerContainer: cnr.Owner(),
verb: session.VerbContainerPut,
- binTokenSession: pctx.e.SessionToken(),
- binPublicKey: pctx.e.PublicKey(),
- signature: pctx.e.Signature(),
+ binTokenSession: ctx.e.SessionToken(),
+ binPublicKey: ctx.e.PublicKey(),
+ signature: ctx.e.Signature(),
signedData: binCnr,
})
if err != nil {
@@ -88,13 +87,13 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
}
// check homomorphic hashing setting
- err = checkHomomorphicHashing(ctx, cp.netState, cnr)
+ err = checkHomomorphicHashing(cp.netState, cnr)
if err != nil {
return fmt.Errorf("incorrect homomorphic hashing setting: %w", err)
}
// check native name and zone
- err = cp.checkNNS(ctx, pctx, cnr)
+ err = cp.checkNNS(ctx, cnr)
if err != nil {
return fmt.Errorf("NNS: %w", err)
}
@@ -104,24 +103,24 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
// Process delete container operation from the user by checking container sanity
// and sending approve tx back to morph.
-func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool {
- if !cp.alphabetState.IsAlphabet(ctx) {
- cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete)
+func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
+ if !cp.alphabetState.IsAlphabet() {
+ cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
return true
}
- err := cp.checkDeleteContainer(ctx, e)
+ err := cp.checkDeleteContainer(e)
if err != nil {
- cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
- zap.Error(err),
+ cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
+ zap.String("error", err.Error()),
)
return false
}
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
- cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
- zap.Error(err),
+ cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
+ zap.String("error", err.Error()),
)
return false
@@ -130,7 +129,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
return true
}
-func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error {
+func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error {
binCnr := e.ContainerID()
var idCnr cid.ID
@@ -141,12 +140,12 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.
}
// receive owner of the related container
- cnr, err := cp.cnrClient.Get(ctx, binCnr)
+ cnr, err := cp.cnrClient.Get(binCnr)
if err != nil {
return fmt.Errorf("could not receive the container: %w", err)
}
- err = cp.verifySignature(ctx, signatureVerificationData{
+ err = cp.verifySignature(signatureVerificationData{
ownerContainer: cnr.Value.Owner(),
verb: session.VerbContainerDelete,
idContainerSet: true,
@@ -163,21 +162,21 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.
return nil
}
-func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error {
+func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error {
// fetch domain info
- pctx.d = containerSDK.ReadDomain(cnr)
+ ctx.d = containerSDK.ReadDomain(cnr)
// if PutNamed event => check if values in container correspond to args
- if named, ok := pctx.e.(interface {
+ if named, ok := ctx.e.(interface {
Name() string
Zone() string
}); ok {
- if name := named.Name(); name != pctx.d.Name() {
- return fmt.Errorf("names differ %s/%s", name, pctx.d.Name())
+ if name := named.Name(); name != ctx.d.Name() {
+ return fmt.Errorf("names differ %s/%s", name, ctx.d.Name())
}
- if zone := named.Zone(); zone != pctx.d.Zone() {
- return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone())
+ if zone := named.Zone(); zone != ctx.d.Zone() {
+ return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone())
}
}
@@ -186,12 +185,12 @@ func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cn
return fmt.Errorf("could not get container owner address: %w", err)
}
- subject, err := cp.frostFSIDClient.GetSubject(ctx, addr)
+ subject, err := cp.frostFSIDClient.GetSubject(addr)
if err != nil {
return fmt.Errorf("could not get subject from FrostfsID contract: %w", err)
}
- namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns")
+ namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns")
if !hasNamespace {
return nil
}
@@ -203,13 +202,13 @@ func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cn
return nil
}
-func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error {
- netSetting, err := ns.HomomorphicHashDisabled(ctx)
+func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error {
+ netSetting, err := ns.HomomorphicHashDisabled()
if err != nil {
return fmt.Errorf("could not get setting in contract: %w", err)
}
- if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting {
+ if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting {
return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting)
}
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index 9be93baa4..8fd9edfb8 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -1,11 +1,11 @@
package container
import (
- "context"
"errors"
"fmt"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -15,17 +15,18 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
ContClient interface {
ContractAddress() util.Uint160
- Get(ctx context.Context, cid []byte) (*containercore.Container, error)
+ Get(cid []byte) (*containercore.Container, error)
}
MorphClient interface {
@@ -33,7 +34,7 @@ type (
}
FrostFSIDClient interface {
- GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error)
+ GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error)
}
// Processor of events produced by container contract in the sidechain.
@@ -68,7 +69,7 @@ type NetworkState interface {
//
// Must return any error encountered
// which did not allow reading the value.
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
// HomomorphicHashDisabled must return boolean that
// represents homomorphic network state:
@@ -76,7 +77,7 @@ type NetworkState interface {
// * false if hashing is enabled.
//
// which did not allow reading the value.
- HomomorphicHashDisabled(ctx context.Context) (bool, error)
+ HomomorphicHashDisabled() (bool, error)
}
// New creates a container contract processor instance.
@@ -96,6 +97,8 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/container: FrostFSID client is not set")
}
+ p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err)
@@ -118,6 +121,11 @@ func New(p *Params) (*Processor, error) {
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ return nil
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil
@@ -149,6 +157,11 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
p.SetParser(containerEvent.ParseDeleteNotary)
pp = append(pp, p)
+ // set EACL
+ p.SetRequestType(containerEvent.SetEACLNotaryEvent)
+ p.SetParser(containerEvent.ParseSetEACLNotary)
+ pp = append(pp, p)
+
return pp
}
@@ -177,5 +190,10 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
h.SetHandler(cp.handleDelete)
hh = append(hh, h)
+ // set eACL
+ h.SetRequestType(containerEvent.SetEACLNotaryEvent)
+ h.SetHandler(cp.handleSetEACL)
+ hh = append(hh, h)
+
return hh
}
diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go
index 936de2e77..c80f9fdc5 100644
--- a/pkg/innerring/processors/frostfs/handlers.go
+++ b/pkg/innerring/processors/frostfs/handlers.go
@@ -2,7 +2,6 @@ package frostfs
import (
"bytes"
- "context"
"encoding/hex"
"slices"
@@ -13,67 +12,67 @@ import (
"go.uber.org/zap"
)
-func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) {
+func (np *Processor) handleDeposit(ev event.Event) {
deposit := ev.(frostfsEvent.Deposit)
depositIDBin := bytes.Clone(deposit.ID())
slices.Reverse(depositIDBin)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "deposit"),
zap.String("id", hex.EncodeToString(depositIDBin)))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool {
- return np.processDeposit(ctx, deposit)
+ return np.processDeposit(deposit)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) {
+func (np *Processor) handleWithdraw(ev event.Event) {
withdraw := ev.(frostfsEvent.Withdraw)
withdrawBin := bytes.Clone(withdraw.ID())
slices.Reverse(withdrawBin)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "withdraw"),
zap.String("id", hex.EncodeToString(withdrawBin)))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool {
- return np.processWithdraw(ctx, withdraw)
+ return np.processWithdraw(withdraw)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleCheque(ctx context.Context, ev event.Event) {
+func (np *Processor) handleCheque(ev event.Event) {
cheque := ev.(frostfsEvent.Cheque)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "cheque"),
zap.String("id", hex.EncodeToString(cheque.ID())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool {
- return np.processCheque(ctx, cheque)
+ return np.processCheque(cheque)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
+func (np *Processor) handleConfig(ev event.Event) {
cfg := ev.(frostfsEvent.Config)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "set config"),
zap.String("key", hex.EncodeToString(cfg.Key())),
zap.String("value", hex.EncodeToString(cfg.Value())))
@@ -81,11 +80,11 @@ func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool {
- return np.processConfig(ctx, cfg)
+ return np.processConfig(cfg)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go
index 72310f6f9..6425172bd 100644
--- a/pkg/innerring/processors/frostfs/handlers_test.go
+++ b/pkg/innerring/processors/frostfs/handlers_test.go
@@ -1,7 +1,6 @@
package frostfs
import (
- "context"
"testing"
"time"
@@ -37,7 +36,7 @@ func TestHandleDeposit(t *testing.T) {
AmountValue: 1000,
}
- proc.handleDeposit(context.Background(), ev)
+ proc.handleDeposit(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -58,7 +57,7 @@ func TestHandleDeposit(t *testing.T) {
es.epochCounter = 109
- proc.handleDeposit(context.Background(), ev)
+ proc.handleDeposit(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -99,7 +98,7 @@ func TestHandleWithdraw(t *testing.T) {
AmountValue: 1000,
}
- proc.handleWithdraw(context.Background(), ev)
+ proc.handleWithdraw(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -140,7 +139,7 @@ func TestHandleCheque(t *testing.T) {
LockValue: util.Uint160{200},
}
- proc.handleCheque(context.Background(), ev)
+ proc.handleCheque(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -177,7 +176,7 @@ func TestHandleConfig(t *testing.T) {
TxHashValue: util.Uint256{100},
}
- proc.handleConfig(context.Background(), ev)
+ proc.handleConfig(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -226,7 +225,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
@@ -242,17 +241,17 @@ type testBalaceClient struct {
burn []balance.BurnPrm
}
-func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error {
+func (c *testBalaceClient) Mint(p balance.MintPrm) error {
c.mint = append(c.mint, p)
return nil
}
-func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error {
+func (c *testBalaceClient) Lock(p balance.LockPrm) error {
c.lock = append(c.lock, p)
return nil
}
-func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error {
+func (c *testBalaceClient) Burn(p balance.BurnPrm) error {
c.burn = append(c.burn, p)
return nil
}
@@ -261,7 +260,7 @@ type testNetmapClient struct {
config []nmClient.SetConfigPrm
}
-func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error {
+func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error {
c.config = append(c.config, p)
return nil
}
diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go
index d10eb9660..c72aeceee 100644
--- a/pkg/innerring/processors/frostfs/process_assets.go
+++ b/pkg/innerring/processors/frostfs/process_assets.go
@@ -1,8 +1,6 @@
package frostfs
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -17,9 +15,9 @@ const (
// Process deposit event by invoking a balance contract and sending native
// gas in the sidechain.
-func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit)
+func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
return true
}
@@ -30,9 +28,9 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
prm.SetID(deposit.ID())
// send transferX to a balance contract
- err := np.balanceClient.Mint(ctx, prm)
+ err := np.balanceClient.Mint(prm)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
+ np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
}
curEpoch := np.epochState.EpochCounter()
@@ -46,7 +44,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
- np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined,
+ np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
zap.Stringer("receiver", receiver),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
@@ -58,12 +56,12 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
// before gas transfer check if the balance is greater than the threshold
balance, err := np.morphClient.GasBalance()
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
+ np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
return false
}
if balance < np.gasBalanceThreshold {
- np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached,
+ np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
zap.Int64("balance", balance),
zap.Int64("threshold", np.gasBalanceThreshold))
@@ -72,8 +70,8 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
- zap.Error(err))
+ np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
+ zap.String("error", err.Error()))
return false
}
@@ -84,16 +82,16 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
}
// Process withdraw event by locking assets in the balance account.
-func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw)
+func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
return true
}
// create lock account
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err))
+ np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
return false
}
@@ -107,9 +105,9 @@ func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.
prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount()))
prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime))
- err = np.balanceClient.Lock(ctx, prm)
+ err = np.balanceClient.Lock(prm)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
+ np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
return false
}
@@ -118,9 +116,9 @@ func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.
// Process cheque event by transferring assets from the lock account back to
// the reserve account.
-func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque)
+func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
return true
}
@@ -130,9 +128,9 @@ func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheq
prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount()))
prm.SetID(cheque.ID())
- err := np.balanceClient.Burn(ctx, prm)
+ err := np.balanceClient.Burn(prm)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
+ np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go
index dc579f6bb..2ae3e6ced 100644
--- a/pkg/innerring/processors/frostfs/process_config.go
+++ b/pkg/innerring/processors/frostfs/process_config.go
@@ -1,8 +1,6 @@
package frostfs
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -11,9 +9,9 @@ import (
// Process config event by setting configuration value from the mainchain in
// the sidechain.
-func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig)
+func (np *Processor) processConfig(config frostfsEvent.Config) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
return true
}
@@ -24,9 +22,9 @@ func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Conf
prm.SetValue(config.Value())
prm.SetHash(config.TxHash())
- err := np.netmapClient.SetConfig(ctx, prm)
+ err := np.netmapClient.SetConfig(prm)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
+ np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go
index 9d3bf65cd..2019857ac 100644
--- a/pkg/innerring/processors/frostfs/processor.go
+++ b/pkg/innerring/processors/frostfs/processor.go
@@ -1,11 +1,11 @@
package frostfs
import (
- "context"
"errors"
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -16,6 +16,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
@@ -26,7 +27,7 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
// PrecisionConverter converts balance amount values.
@@ -35,13 +36,13 @@ type (
}
BalanceClient interface {
- Mint(ctx context.Context, p balance.MintPrm) error
- Lock(ctx context.Context, p balance.LockPrm) error
- Burn(ctx context.Context, p balance.BurnPrm) error
+ Mint(p balance.MintPrm) error
+ Lock(p balance.LockPrm) error
+ Burn(p balance.BurnPrm) error
}
NetmapClient interface {
- SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error
+ SetConfig(p nmClient.SetConfigPrm) error
}
MorphClient interface {
@@ -109,6 +110,8 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/frostfs: balance precision converter is not set")
}
+ p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
@@ -142,34 +145,70 @@ func New(p *Params) (*Processor, error) {
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ var (
+ parsers = make([]event.NotificationParserInfo, 0, 6)
+
+ p event.NotificationParserInfo
+ )
+
+ p.SetScriptHash(np.frostfsContract)
+
+ // deposit event
+ p.SetType(event.TypeFromString(depositNotification))
+ p.SetParser(frostfsEvent.ParseDeposit)
+ parsers = append(parsers, p)
+
+ // withdraw event
+ p.SetType(event.TypeFromString(withdrawNotification))
+ p.SetParser(frostfsEvent.ParseWithdraw)
+ parsers = append(parsers, p)
+
+ // cheque event
+ p.SetType(event.TypeFromString(chequeNotification))
+ p.SetParser(frostfsEvent.ParseCheque)
+ parsers = append(parsers, p)
+
+ // config event
+ p.SetType(event.TypeFromString(configNotification))
+ p.SetParser(frostfsEvent.ParseConfig)
+ parsers = append(parsers, p)
+
+ return parsers
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return []event.NotificationHandlerInfo{
- {
- Contract: np.frostfsContract,
- Type: event.TypeFromString(depositNotification),
- Parser: frostfsEvent.ParseDeposit,
- Handlers: []event.Handler{np.handleDeposit},
- },
- {
- Contract: np.frostfsContract,
- Type: event.TypeFromString(withdrawNotification),
- Parser: frostfsEvent.ParseWithdraw,
- Handlers: []event.Handler{np.handleWithdraw},
- },
- {
- Contract: np.frostfsContract,
- Type: event.TypeFromString(chequeNotification),
- Parser: frostfsEvent.ParseCheque,
- Handlers: []event.Handler{np.handleCheque},
- },
- {
- Contract: np.frostfsContract,
- Type: event.TypeFromString(configNotification),
- Parser: frostfsEvent.ParseConfig,
- Handlers: []event.Handler{np.handleConfig},
- },
- }
+ var (
+ handlers = make([]event.NotificationHandlerInfo, 0, 6)
+
+ h event.NotificationHandlerInfo
+ )
+
+ h.SetScriptHash(np.frostfsContract)
+
+ // deposit handler
+ h.SetType(event.TypeFromString(depositNotification))
+ h.SetHandler(np.handleDeposit)
+ handlers = append(handlers, h)
+
+ // withdraw handler
+ h.SetType(event.TypeFromString(withdrawNotification))
+ h.SetHandler(np.handleWithdraw)
+ handlers = append(handlers, h)
+
+ // cheque handler
+ h.SetType(event.TypeFromString(chequeNotification))
+ h.SetHandler(np.handleCheque)
+ handlers = append(handlers, h)
+
+ // config handler
+ h.SetType(event.TypeFromString(configNotification))
+ h.SetHandler(np.handleConfig)
+ handlers = append(handlers, h)
+
+ return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go
index 7e8ab629d..fd7f539c3 100644
--- a/pkg/innerring/processors/governance/handlers.go
+++ b/pkg/innerring/processors/governance/handlers.go
@@ -1,8 +1,6 @@
package governance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -13,7 +11,7 @@ import (
"go.uber.org/zap"
)
-func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) {
+func (gp *Processor) HandleAlphabetSync(e event.Event) {
var (
typ string
hash util.Uint256
@@ -34,16 +32,16 @@ func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) {
return
}
- gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ))
+ gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
// send event to the worker pool
err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool {
- return gp.processAlphabetSync(ctx, hash)
+ return gp.processAlphabetSync(hash)
})
if err != nil {
// there system can be moved into controlled degradation stage
- gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained,
+ gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
zap.Int("capacity", gp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go
index 864c5da67..b73e24318 100644
--- a/pkg/innerring/processors/governance/handlers_test.go
+++ b/pkg/innerring/processors/governance/handlers_test.go
@@ -1,7 +1,6 @@
package governance
import (
- "context"
"encoding/binary"
"sort"
"testing"
@@ -9,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
+ nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@@ -38,6 +38,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
+ nm := &testNetmapClient{}
proc, err := New(
&Params{
@@ -49,6 +50,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
+ NetmapClient: nm,
},
)
@@ -58,7 +60,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
txHash: util.Uint256{100},
}
- proc.HandleAlphabetSync(context.Background(), ev)
+ proc.HandleAlphabetSync(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -71,6 +73,10 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
+ var irUpdateExp []nmClient.UpdateIRPrm
+
+ require.EqualValues(t, irUpdateExp, nm.updates, "invalid IR updates")
+
var expAlphabetUpdate client.UpdateAlphabetListPrm
expAlphabetUpdate.SetHash(ev.txHash)
expAlphabetUpdate.SetList(testKeys.newInnerRingExp)
@@ -113,6 +119,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
+ nm := &testNetmapClient{}
proc, err := New(
&Params{
@@ -124,6 +131,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
+ NetmapClient: nm,
},
)
@@ -134,7 +142,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
Role: noderoles.NeoFSAlphabet,
}
- proc.HandleAlphabetSync(context.Background(), ev)
+ proc.HandleAlphabetSync(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -147,6 +155,9 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
+ var irUpdatesExp []nmClient.UpdateIRPrm
+ require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates")
+
var alpabetUpdExp client.UpdateAlphabetListPrm
alpabetUpdExp.SetList(testKeys.newInnerRingExp)
alpabetUpdExp.SetHash(ev.TxHash)
@@ -219,7 +230,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
@@ -227,7 +238,7 @@ type testVoter struct {
votes []VoteValidatorPrm
}
-func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error {
+func (v *testVoter) VoteForSidechainValidator(prm VoteValidatorPrm) error {
v.votes = append(v.votes, prm)
return nil
}
@@ -236,7 +247,7 @@ type testIRFetcher struct {
publicKeys keys.PublicKeys
}
-func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) {
+func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
return f.publicKeys, nil
}
@@ -251,12 +262,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) {
return c.commiteeKeys, nil
}
-func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error {
+func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error {
c.alphabetUpdates = append(c.alphabetUpdates, prm)
return nil
}
-func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error {
+func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error {
c.notaryUpdates = append(c.notaryUpdates, prm)
return nil
}
@@ -266,7 +277,7 @@ type testMainnetClient struct {
designateHash util.Uint160
}
-func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) {
+func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
return c.alphabetKeys, nil
}
@@ -278,7 +289,16 @@ type testFrostFSClient struct {
updates []frostfscontract.AlphabetUpdatePrm
}
-func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error {
+func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error {
+ c.updates = append(c.updates, p)
+ return nil
+}
+
+type testNetmapClient struct {
+ updates []nmClient.UpdateIRPrm
+}
+
+func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error {
c.updates = append(c.updates, p)
return nil
}
diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go
index 6e22abb3c..50ba58e77 100644
--- a/pkg/innerring/processors/governance/process_update.go
+++ b/pkg/innerring/processors/governance/process_update.go
@@ -1,7 +1,6 @@
package governance
import (
- "context"
"encoding/binary"
"encoding/hex"
"sort"
@@ -19,39 +18,39 @@ const (
alphabetUpdateIDPrefix = "AlphabetUpdate"
)
-func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool {
- if !gp.alphabetState.IsAlphabet(ctx) {
- gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
+func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
+ if !gp.alphabetState.IsAlphabet() {
+ gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
return true
}
- mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx)
+ mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
+ zap.String("error", err.Error()))
return false
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
+ zap.String("error", err.Error()))
return false
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
+ zap.String("error", err.Error()))
return false
}
if newAlphabet == nil {
- gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
+ gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
return true
}
- gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
+ gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
zap.String("new_alphabet", prettyKeys(newAlphabet)),
)
@@ -62,22 +61,22 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
}
// 1. Vote to sidechain committee via alphabet contracts.
- err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
+ err = gp.voter.VoteForSidechainValidator(votePrm)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
+ zap.String("error", err.Error()))
}
// 2. Update NeoFSAlphabet role in the sidechain.
- gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash)
+ gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash)
// 3. Update notary role in the sidechain.
- gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash)
+ gp.updateNotaryRoleInSidechain(newAlphabet, txHash)
// 4. Update FrostFS contract in the mainnet.
- gp.updateFrostFSContractInMainnet(ctx, newAlphabet)
+ gp.updateFrostFSContractInMainnet(newAlphabet)
- gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate)
+ gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
return true
}
@@ -94,24 +93,24 @@ func prettyKeys(keys keys.PublicKeys) string {
return strings.TrimRight(sb.String(), delimiter)
}
-func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
- innerRing, err := gp.irFetcher.InnerRingKeys(ctx)
+func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
+ innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
+ zap.String("error", err.Error()))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
+ zap.String("error", err.Error()))
return
}
sort.Sort(newInnerRing)
- gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList,
+ gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
zap.String("before", prettyKeys(innerRing)),
zap.String("after", prettyKeys(newInnerRing)),
)
@@ -120,26 +119,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
updPrm.SetList(newInnerRing)
updPrm.SetHash(txHash)
- if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
- gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
- zap.Error(err))
+ if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil {
+ gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
+ zap.String("error", err.Error()))
}
}
-func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) {
+func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) {
updPrm := client.UpdateNotaryListPrm{}
updPrm.SetList(newAlphabet)
updPrm.SetHash(txHash)
- err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
+ err := gp.morphClient.UpdateNotaryList(updPrm)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
+ zap.String("error", err.Error()))
}
}
-func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) {
+func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) {
epoch := gp.epochState.EpochCounter()
buf := make([]byte, 8)
@@ -152,9 +151,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph
prm.SetID(id)
prm.SetPubs(newAlphabet)
- err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
+ err := gp.frostfsClient.AlphabetUpdate(prm)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
+ zap.String("error", err.Error()))
}
}
diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go
index 2d131edda..fa267eade 100644
--- a/pkg/innerring/processors/governance/processor.go
+++ b/pkg/innerring/processors/governance/processor.go
@@ -1,7 +1,6 @@
package governance
import (
- "context"
"errors"
"fmt"
@@ -26,7 +25,7 @@ const ProcessorPoolSize = 1
type (
// AlphabetState is a callback interface for innerring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
)
@@ -39,7 +38,7 @@ type VoteValidatorPrm struct {
// Voter is a callback interface for alphabet contract voting.
type Voter interface {
- VoteForSidechainValidator(context.Context, VoteValidatorPrm) error
+ VoteForSidechainValidator(VoteValidatorPrm) error
}
type (
@@ -52,11 +51,11 @@ type (
// Implementation must take into account availability of
// the notary contract.
IRFetcher interface {
- InnerRingKeys(ctx context.Context) (keys.PublicKeys, error)
+ InnerRingKeys() (keys.PublicKeys, error)
}
FrostFSClient interface {
- AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error
+ AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error
}
NetmapClient interface {
@@ -64,14 +63,14 @@ type (
}
MainnetClient interface {
- NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error)
+ NeoFSAlphabetList() (res keys.PublicKeys, err error)
GetDesignateHash() util.Uint160
}
MorphClient interface {
Committee() (res keys.PublicKeys, err error)
- UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error
- UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error
+ UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error
+ UpdateNotaryList(prm client.UpdateNotaryListPrm) error
}
// Processor of events related to governance in the network.
@@ -80,6 +79,7 @@ type (
metrics metrics.Register
pool *ants.Pool
frostfsClient FrostFSClient
+ netmapClient NetmapClient
alphabetState AlphabetState
epochState EpochState
@@ -105,6 +105,7 @@ type (
MorphClient MorphClient
MainnetClient MainnetClient
FrostFSClient FrostFSClient
+ NetmapClient NetmapClient
}
)
@@ -145,6 +146,7 @@ func New(p *Params) (*Processor, error) {
metrics: metricsRegister,
pool: pool,
frostfsClient: p.FrostFSClient,
+ netmapClient: p.NetmapClient,
alphabetState: p.AlphabetState,
epochState: p.EpochState,
voter: p.Voter,
@@ -155,16 +157,22 @@ func New(p *Params) (*Processor, error) {
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ var pi event.NotificationParserInfo
+ pi.SetScriptHash(gp.designate)
+ pi.SetType(event.TypeFromString(native.DesignationEventName))
+ pi.SetParser(rolemanagement.ParseDesignate)
+ return []event.NotificationParserInfo{pi}
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return []event.NotificationHandlerInfo{
- {
- Contract: gp.designate,
- Type: event.TypeFromString(native.DesignationEventName),
- Parser: rolemanagement.ParseDesignate,
- Handlers: []event.Handler{gp.HandleAlphabetSync},
- },
- }
+ var hi event.NotificationHandlerInfo
+ hi.SetScriptHash(gp.designate)
+ hi.SetType(event.TypeFromString(native.DesignationEventName))
+ hi.SetHandler(gp.HandleAlphabetSync)
+ return []event.NotificationHandlerInfo{hi}
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go
index abd5b089a..c18611569 100644
--- a/pkg/innerring/processors/netmap/cleanup_table.go
+++ b/pkg/innerring/processors/netmap/cleanup_table.go
@@ -60,7 +60,7 @@ func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) {
}
access.binNodeInfo = binNodeInfo
- access.maintenance = nmNodes[i].Status().IsMaintenance()
+ access.maintenance = nmNodes[i].IsMaintenance()
newMap[keyString] = access
}
diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go
index 208bd5496..ae5620733 100644
--- a/pkg/innerring/processors/netmap/cleanup_table_test.go
+++ b/pkg/innerring/processors/netmap/cleanup_table_test.go
@@ -127,7 +127,7 @@ func TestCleanupTable(t *testing.T) {
t.Run("skip maintenance nodes", func(t *testing.T) {
cnt := 0
- infos[1].SetStatus(netmap.Maintenance)
+ infos[1].SetMaintenance()
key := netmap.StringifyPublicKey(infos[1])
c.update(networkMap, 5)
diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go
index 4c7199a49..c6053e281 100644
--- a/pkg/innerring/processors/netmap/handlers.go
+++ b/pkg/innerring/processors/netmap/handlers.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -12,93 +11,93 @@ import (
"go.uber.org/zap"
)
-func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
+func (np *Processor) HandleNewEpochTick(ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
- np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch"))
+ np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
// send an event to the worker pool
- err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
+ err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick)
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
+func (np *Processor) handleNewEpoch(ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
- return np.processNewEpoch(ctx, epochEvent)
+ return np.processNewEpoch(epochEvent)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
+func (np *Processor) handleAddPeer(ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "add peer"),
)
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
- return np.processAddPeer(ctx, newPeer)
+ return np.processAddPeer(newPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
+func (np *Processor) handleUpdateState(ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
- return np.processUpdatePeer(ctx, updPeer)
+ return np.processUpdatePeer(updPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
+func (np *Processor) handleCleanupTick(ev event.Event) {
if !np.netmapSnapshot.enabled {
- np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
+ np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
return
}
cleanup := ev.(netmapCleanupTick)
- np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
+ np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
- return np.processNetmapCleanupTick(ctx, cleanup)
+ return np.processNetmapCleanupTick(cleanup)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go
index 934c3790d..b34abb78c 100644
--- a/pkg/innerring/processors/netmap/handlers_test.go
+++ b/pkg/innerring/processors/netmap/handlers_test.go
@@ -1,19 +1,19 @@
package netmap
import (
- "context"
"fmt"
"testing"
"time"
+ v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -39,7 +39,7 @@ func TestNewEpochTick(t *testing.T) {
require.NoError(t, err, "failed to create processor")
ev := timerEvent.NewEpochTick{}
- proc.HandleNewEpochTick(context.Background(), ev)
+ proc.HandleNewEpochTick(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -68,6 +68,7 @@ func TestNewEpoch(t *testing.T) {
duration: 10,
}
r := &testEpochResetter{}
+ cc := &testContainerClient{}
nc := &testNetmapClient{
epochDuration: 20,
txHeights: map[util.Uint256]uint32{
@@ -81,6 +82,7 @@ func TestNewEpoch(t *testing.T) {
p.NotaryDepositHandler = eh.Handle
p.AlphabetSyncHandler = eh.Handle
p.NetmapClient = nc
+ p.ContainerWrapper = cc
p.EpochTimer = r
p.EpochState = es
})
@@ -91,7 +93,7 @@ func TestNewEpoch(t *testing.T) {
Num: 101,
Hash: util.Uint256{101},
}
- proc.handleNewEpoch(context.Background(), ev)
+ proc.handleNewEpoch(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -101,6 +103,11 @@ func TestNewEpoch(t *testing.T) {
require.Equal(t, ev.Num, es.counter, "invalid epoch counter")
require.EqualValues(t, []uint32{nc.txHeights[ev.Hash]}, r.timers, "invalid epoch timer resets")
+ var expEstimation cntClient.StartEstimationPrm
+ expEstimation.SetEpoch(ev.Num - 1)
+ expEstimation.SetHash(ev.Hash)
+ require.EqualValues(t, []cntClient.StartEstimationPrm{expEstimation}, cc.estimations, "invalid estimations")
+
require.EqualValues(t, []event.Event{
governance.NewSyncEvent(ev.TxHash()),
ev,
@@ -131,7 +138,7 @@ func TestAddPeer(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
- proc.handleAddPeer(context.Background(), ev)
+ proc.handleAddPeer(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -139,14 +146,14 @@ func TestAddPeer(t *testing.T) {
require.Nil(t, nc.notaryInvokes, "invalid notary invokes")
- node.SetStatus(netmap.Online)
+ node.SetOnline()
ev = netmapEvent.AddPeer{
NodeBytes: node.Marshal(),
Request: &payload.P2PNotaryRequest{
MainTransaction: &transaction.Transaction{},
},
}
- proc.handleAddPeer(context.Background(), ev)
+ proc.handleAddPeer(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -189,7 +196,7 @@ func TestUpdateState(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
- proc.handleUpdateState(context.Background(), ev)
+ proc.handleUpdateState(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -233,7 +240,7 @@ func TestCleanupTick(t *testing.T) {
txHash: util.Uint256{123},
}
- proc.handleCleanupTick(context.Background(), ev)
+ proc.handleCleanupTick(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -267,6 +274,7 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
as := &testAlphabetState{
isAlphabet: true,
}
+ cc := &testContainerClient{}
nc := &testNetmapClient{}
eh := &testEventHandler{}
@@ -280,6 +288,7 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
EpochState: es,
EpochTimer: r,
AlphabetState: as,
+ ContainerWrapper: cc,
NetmapClient: nc,
NotaryDepositHandler: eh.Handle,
AlphabetSyncHandler: eh.Handle,
@@ -294,7 +303,7 @@ type testNodeStateSettings struct {
maintAllowed bool
}
-func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error {
+func (s *testNodeStateSettings) MaintenanceModeAllowed() error {
if s.maintAllowed {
return nil
}
@@ -303,7 +312,7 @@ func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error {
type testValidator struct{}
-func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error {
+func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error {
return nil
}
@@ -341,10 +350,19 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
+type testContainerClient struct {
+ estimations []cntClient.StartEstimationPrm
+}
+
+func (c *testContainerClient) StartEstimation(p cntClient.StartEstimationPrm) error {
+ c.estimations = append(c.estimations, p)
+ return nil
+}
+
type notaryInvoke struct {
contract util.Uint160
fee fixedn.Fixed8
@@ -365,7 +383,7 @@ type testNetmapClient struct {
invokedTxs []*transaction.Transaction
}
-func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{
contract: contract,
fee: fee,
@@ -381,7 +399,7 @@ func (c *testNetmapClient) ContractAddress() util.Uint160 {
return c.contractAddress
}
-func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) {
+func (c *testNetmapClient) EpochDuration() (uint64, error) {
return c.epochDuration, nil
}
@@ -392,11 +410,11 @@ func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) {
return 0, fmt.Errorf("not found")
}
-func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) {
+func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil
}
-func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error {
+func (c *testNetmapClient) NewEpoch(epoch uint64) error {
c.newEpochs = append(c.newEpochs, epoch)
return nil
}
@@ -414,6 +432,6 @@ type testEventHandler struct {
handledEvents []event.Event
}
-func (h *testEventHandler) Handle(_ context.Context, e event.Event) {
+func (h *testEventHandler) Handle(e event.Event) {
h.handledEvents = append(h.handledEvents, e)
}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
index b81dc9989..5e0558344 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
@@ -1,7 +1,6 @@
package locode
import (
- "context"
"errors"
"fmt"
@@ -30,7 +29,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record
// - Continent: R.Continent().String().
//
// UN-LOCODE attribute remains untouched.
-func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error {
+func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
attrLocode := n.LOCODE()
if attrLocode == "" {
return nil
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
index fa2dd1ac1..8ab174dfd 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
@@ -1,7 +1,6 @@
package locode_test
import (
- "context"
"errors"
"fmt"
"testing"
@@ -93,7 +92,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
t.Run("w/o locode", func(t *testing.T) {
n := nodeInfoWithSomeAttrs()
- err := validator.VerifyAndUpdate(context.Background(), n)
+ err := validator.VerifyAndUpdate(n)
require.NoError(t, err)
})
@@ -103,7 +102,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttrValue(n, "WRONG LOCODE")
- err := validator.VerifyAndUpdate(context.Background(), n)
+ err := validator.VerifyAndUpdate(n)
require.Error(t, err)
})
@@ -112,7 +111,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"})
- err := validator.VerifyAndUpdate(context.Background(), n)
+ err := validator.VerifyAndUpdate(n)
require.Error(t, err)
})
@@ -120,7 +119,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, r.LOCODE)
- err := validator.VerifyAndUpdate(context.Background(), n)
+ err := validator.VerifyAndUpdate(n)
require.NoError(t, err)
require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode"))
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
index ba5db9205..8f6667933 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
@@ -8,38 +8,38 @@ import (
// Record is an interface of read-only
// FrostFS LOCODE database single entry.
type Record interface {
- // CountryCode must return ISO 3166-1 alpha-2
+ // Must return ISO 3166-1 alpha-2
// country code.
//
// Must not return nil.
CountryCode() *locodedb.CountryCode
- // CountryName must return English short country name
+ // Must return English short country name
// officially used by the ISO 3166
// Maintenance Agency (ISO 3166/MA).
CountryName() string
- // LocationCode must return UN/LOCODE 3-character code
+ // Must return UN/LOCODE 3-character code
// for the location (numerals 2-9 may also
// be used).
//
// Must not return nil.
LocationCode() *locodedb.LocationCode
- // LocationName must return name of the location which
+ // Must return name of the location which
// have been allocated a UN/LOCODE without
// diacritic sign.
LocationName() string
- // SubDivCode Must return ISO 1-3 character alphabetic
+ // Must return ISO 1-3 character alphabetic
// and/or numeric code for the administrative
// division of the country concerned.
SubDivCode() string
- // SubDivName must return subdivision name.
+ // Must return subdivision name.
SubDivName() string
- // Continent must return existing continent where is
+ // Must return existing continent where is
// the location.
//
// Must not return nil.
@@ -49,7 +49,7 @@ type Record interface {
// DB is an interface of read-only
// FrostFS LOCODE database.
type DB interface {
- // Get must find the record that corresponds to
+ // Must find the record that corresponds to
// LOCODE and provides the Record interface.
//
// Must return an error if Record is nil.
diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
index 0e4628ac7..126f36582 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
@@ -1,7 +1,6 @@
package maddress
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -9,7 +8,7 @@ import (
)
// VerifyAndUpdate calls network.VerifyAddress.
-func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error {
+func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
err := network.VerifyMultiAddress(*n)
if err != nil {
return fmt.Errorf("could not verify multiaddress: %w", err)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
index 03c41a451..4094e50a5 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
@@ -7,7 +7,6 @@ map candidates.
package state
import (
- "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -24,7 +23,7 @@ type NetworkSettings interface {
// no error if allowed;
// ErrMaintenanceModeDisallowed if disallowed;
// other error if there are any problems with the check.
- MaintenanceModeAllowed(ctx context.Context) error
+ MaintenanceModeAllowed() error
}
// NetMapCandidateValidator represents tool which checks state of nodes which
@@ -56,13 +55,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting
// MUST NOT be called before SetNetworkSettings.
//
// See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods.
-func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error {
- if node.Status().IsOnline() {
+func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error {
+ if node.IsOnline() {
return nil
}
- if node.Status().IsMaintenance() {
- return x.netSettings.MaintenanceModeAllowed(ctx)
+ if node.IsMaintenance() {
+ return x.netSettings.MaintenanceModeAllowed()
}
return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE")
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
index cbf48a710..a557628f0 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
@@ -1,7 +1,6 @@
package state_test
import (
- "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@@ -14,7 +13,7 @@ type testNetworkSettings struct {
disallowed bool
}
-func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error {
+func (x testNetworkSettings) MaintenanceModeAllowed() error {
if x.disallowed {
return state.ErrMaintenanceModeDisallowed
}
@@ -42,22 +41,22 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
},
{
name: "ONLINE",
- preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) },
+ preparer: (*netmap.NodeInfo).SetOnline,
valid: true,
},
{
name: "OFFLINE",
- preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) },
+ preparer: (*netmap.NodeInfo).SetOffline,
valid: false,
},
{
name: "MAINTENANCE/allowed",
- preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
+ preparer: (*netmap.NodeInfo).SetMaintenance,
valid: true,
},
{
name: "MAINTENANCE/disallowed",
- preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
+ preparer: (*netmap.NodeInfo).SetMaintenance,
valid: false,
validatorPreparer: func(v *state.NetMapCandidateValidator) {
var s testNetworkSettings
@@ -82,7 +81,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
testCase.validatorPreparer(&v)
}
- err := v.VerifyAndUpdate(context.Background(), &node)
+ err := v.VerifyAndUpdate(&node)
if testCase.valid {
require.NoError(t, err, testCase.name)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go
index 3dbe98a8d..e9b24e024 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/validator.go
@@ -1,8 +1,6 @@
package nodevalidation
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -28,9 +26,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator {
// VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators.
//
// If error appears, returns it immediately.
-func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error {
+func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error {
for _, v := range c.validators {
- if err := v.VerifyAndUpdate(ctx, ni); err != nil {
+ if err := v.VerifyAndUpdate(ni); err != nil {
return err
}
}
diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go
index 8f8cc17ff..170c39e2c 100644
--- a/pkg/innerring/processors/netmap/process_cleanup.go
+++ b/pkg/innerring/processors/netmap/process_cleanup.go
@@ -1,17 +1,15 @@
package netmap
import (
- "context"
-
+ v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
-func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
+func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
return true
}
@@ -19,13 +17,13 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
key, err := keys.NewPublicKeyFromString(s)
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode,
+ np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
zap.String("key", s))
return nil
}
- np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
+ np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
// In notary environments we call UpdateStateIR method instead of UpdateState.
// It differs from UpdateState only by name, so we can do this in the same form.
@@ -33,7 +31,6 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
const methodUpdateStateNotary = "updateStateIR"
err = np.netmapClient.MorphNotaryInvoke(
- ctx,
np.netmapClient.ContractAddress(),
0,
uint32(ev.epoch),
@@ -42,14 +39,14 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
int64(v2netmap.Offline), key.Bytes(),
)
if err != nil {
- np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
}
return nil
})
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
- zap.Error(err))
+ np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
+ zap.String("error", err.Error()))
return false
}
diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go
index 7c78d24a5..4dfa3997b 100644
--- a/pkg/innerring/processors/netmap/process_epoch.go
+++ b/pkg/innerring/processors/netmap/process_epoch.go
@@ -1,23 +1,22 @@
package netmap
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"go.uber.org/zap"
)
// Process new epoch notification by setting global epoch value and resetting
// local epoch timer.
-func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool {
+func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
epoch := ev.EpochNumber()
- epochDuration, err := np.netmapClient.EpochDuration(ctx)
+ epochDuration, err := np.netmapClient.EpochDuration()
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
- zap.Error(err))
+ np.log.Warn(logs.NetmapCantGetEpochDuration,
+ zap.String("error", err.Error()))
} else {
np.epochState.SetEpochDuration(epochDuration)
}
@@ -26,46 +25,60 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
+ np.log.Warn(logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
- np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
- zap.Error(err))
+ np.log.Warn(logs.NetmapCantResetEpochTimer,
+ zap.String("error", err.Error()))
}
// get new netmap snapshot
- networkMap, err := np.netmapClient.NetMap(ctx)
+ networkMap, err := np.netmapClient.NetMap()
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
- zap.Error(err))
+ np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
+ zap.String("error", err.Error()))
return false
}
+ prm := cntClient.StartEstimationPrm{}
+
+ prm.SetEpoch(epoch - 1)
+ prm.SetHash(ev.TxHash())
+
+ if epoch > 0 && np.alphabetState.IsAlphabet() { // estimates are invalid in genesis epoch
+ err = np.containerWrp.StartEstimation(prm)
+ if err != nil {
+ np.log.Warn(logs.NetmapCantStartContainerSizeEstimation,
+ zap.Uint64("epoch", epoch),
+ zap.String("error", err.Error()))
+ }
+ }
+
np.netmapSnapshot.update(*networkMap, epoch)
- np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
- np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash()))
- np.handleNotaryDeposit(ctx, ev)
+ np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
+ np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash()))
+ np.handleNotaryDeposit(ev)
return true
}
// Process new epoch tick by invoking new epoch method in network map contract.
-func (np *Processor) processNewEpochTick(ctx context.Context) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
+func (np *Processor) processNewEpochTick() bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
return true
}
nextEpoch := np.epochState.EpochCounter() + 1
- np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
+ np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
- err := np.netmapClient.NewEpoch(ctx, nextEpoch)
+ err := np.netmapClient.NewEpoch(nextEpoch)
if err != nil {
- np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index b5c727cc7..9e6e8c283 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -13,9 +12,9 @@ import (
// Process add peer notification by sanity check of new node
// local epoch timer.
-func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
+func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
return true
}
@@ -23,7 +22,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
tx := ev.NotaryRequest().MainTransaction
ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
if err != nil || !ok {
- np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction,
+ np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
zap.String("method", "netmap.AddPeer"),
zap.String("hash", tx.Hash().StringLE()),
zap.Error(err))
@@ -34,15 +33,15 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
var nodeInfo netmap.NodeInfo
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
// it will be nice to have tx id at event structure to log it
- np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate)
+ np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
return false
}
// validate and update node info
- err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo)
+ err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
- np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
- zap.Error(err),
+ np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
+ zap.String("error", err.Error()),
)
return false
@@ -63,8 +62,8 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
// But there is no guarantee that code will be executed in the same order.
// That is why we need to perform `addPeerIR` only in case when node is online,
// because in scope of this method, contract set state `ONLINE` for the node.
- if updated && nodeInfo.Status().IsOnline() {
- np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate,
+ if updated && nodeInfo.IsOnline() {
+ np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
prm := netmapclient.AddPeerPrm{}
@@ -77,7 +76,6 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
// create new notary request with the original nonce
err = np.netmapClient.MorphNotaryInvoke(
- ctx,
np.netmapClient.ContractAddress(),
0,
ev.NotaryRequest().MainTransaction.Nonce,
@@ -86,7 +84,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
nodeInfoBinary,
)
if err != nil {
- np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
return false
}
}
@@ -95,9 +93,9 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
}
// Process update peer notification by sending approval tx to the smart contract.
-func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
+func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
return true
}
@@ -108,9 +106,9 @@ func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.Updat
var err error
if ev.Maintenance() {
- err = np.nodeStateSettings.MaintenanceModeAllowed(ctx)
+ err = np.nodeStateSettings.MaintenanceModeAllowed()
if err != nil {
- np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState,
+ np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
zap.Error(err),
)
@@ -119,7 +117,7 @@ func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.Updat
}
if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
- np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index 277bca1c3..e8fb8721b 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -1,12 +1,13 @@
package netmap
import (
- "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -16,6 +17,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
@@ -34,14 +36,14 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
// NodeValidator wraps basic method of checking the correctness
// of information about the node and its finalization for adding
// to the network map.
NodeValidator interface {
- // VerifyAndUpdate must verify and optionally update NodeInfo structure.
+ // Must verify and optionally update NodeInfo structure.
//
// Must return an error if NodeInfo input is invalid.
// Must return an error if it is not possible to correctly
@@ -49,20 +51,24 @@ type (
//
// If no error occurs, the parameter must point to the
// ready-made NodeInfo structure.
- VerifyAndUpdate(context.Context, *netmap.NodeInfo) error
+ VerifyAndUpdate(*netmap.NodeInfo) error
}
Client interface {
- MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
+ MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
ContractAddress() util.Uint160
- EpochDuration(ctx context.Context) (uint64, error)
+ EpochDuration() (uint64, error)
MorphTxHeight(h util.Uint256) (res uint32, err error)
- NetMap(ctx context.Context) (*netmap.NetMap, error)
- NewEpoch(ctx context.Context, epoch uint64) error
+ NetMap() (*netmap.NetMap, error)
+ NewEpoch(epoch uint64) error
MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error
}
+ ContainerClient interface {
+ StartEstimation(p cntClient.StartEstimationPrm) error
+ }
+
// Processor of events produced by network map contract
// and new epoch ticker, because it is related to contract.
Processor struct {
@@ -74,6 +80,7 @@ type (
alphabetState AlphabetState
netmapClient Client
+ containerWrp ContainerClient
netmapSnapshot cleanupTable
@@ -96,6 +103,7 @@ type (
AlphabetState AlphabetState
CleanupEnabled bool
CleanupThreshold uint64 // in epochs
+ ContainerWrapper ContainerClient
AlphabetSyncHandler event.Handler
NotaryDepositHandler event.Handler
@@ -125,12 +133,16 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/netmap: alphabet sync handler is not set")
case p.NotaryDepositHandler == nil:
return nil, errors.New("ir/netmap: notary deposit handler is not set")
+ case p.ContainerWrapper == nil:
+ return nil, errors.New("ir/netmap: container contract wrapper is not set")
case p.NodeValidator == nil:
return nil, errors.New("ir/netmap: node validator is not set")
case p.NodeStateSettings == nil:
return nil, errors.New("ir/netmap: node state settings is not set")
}
+ p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err)
@@ -149,6 +161,7 @@ func New(p *Params) (*Processor, error) {
epochState: p.EpochState,
alphabetState: p.AlphabetState,
netmapClient: p.NetmapClient,
+ containerWrp: p.ContainerWrapper,
netmapSnapshot: newCleanupTable(p.CleanupEnabled, p.CleanupThreshold),
handleAlphabetSync: p.AlphabetSyncHandler,
@@ -161,16 +174,36 @@ func New(p *Params) (*Processor, error) {
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ parsers := make([]event.NotificationParserInfo, 0, 3)
+
+ var p event.NotificationParserInfo
+
+ p.SetScriptHash(np.netmapClient.ContractAddress())
+
+ // new epoch event
+ p.SetType(newEpochNotification)
+ p.SetParser(netmapEvent.ParseNewEpoch)
+ parsers = append(parsers, p)
+
+ return parsers
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return []event.NotificationHandlerInfo{
- {
- Contract: np.netmapClient.ContractAddress(),
- Type: newEpochNotification,
- Parser: netmapEvent.ParseNewEpoch,
- Handlers: []event.Handler{np.handleNewEpoch},
- },
- }
+ handlers := make([]event.NotificationHandlerInfo, 0, 3)
+
+ var i event.NotificationHandlerInfo
+
+ i.SetScriptHash(np.netmapClient.ContractAddress())
+
+ // new epoch handler
+ i.SetType(newEpochNotification)
+ i.SetHandler(np.handleNewEpoch)
+ handlers = append(handlers, i)
+
+ return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go
index 310f12248..e75fdaf40 100644
--- a/pkg/innerring/processors/netmap/wrappers.go
+++ b/pkg/innerring/processors/netmap/wrappers.go
@@ -1,8 +1,6 @@
package netmap
import (
- "context"
-
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@@ -20,13 +18,13 @@ type netmapClientWrapper struct {
netmapClient *netmapclient.Client
}
-func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error {
- _, err := w.netmapClient.UpdatePeerState(ctx, p)
+func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error {
+ _, err := w.netmapClient.UpdatePeerState(p)
return err
}
-func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
- _, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...)
+func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+ _, err := w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...)
return err
}
@@ -34,28 +32,28 @@ func (w *netmapClientWrapper) ContractAddress() util.Uint160 {
return w.netmapClient.ContractAddress()
}
-func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) {
- return w.netmapClient.EpochDuration(ctx)
+func (w *netmapClientWrapper) EpochDuration() (uint64, error) {
+ return w.netmapClient.EpochDuration()
}
func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) {
return w.netmapClient.Morph().TxHeight(h)
}
-func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) {
- return w.netmapClient.NetMap(ctx)
+func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) {
+ return w.netmapClient.NetMap()
}
-func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error {
- return w.netmapClient.NewEpoch(ctx, epoch)
+func (w *netmapClientWrapper) NewEpoch(epoch uint64) error {
+ return w.netmapClient.NewEpoch(epoch)
}
func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
return w.netmapClient.Morph().IsValidScript(script, signers)
}
-func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error {
- return w.netmapClient.AddPeer(ctx, p)
+func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error {
+ return w.netmapClient.AddPeer(p)
}
func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go
index 0ef771359..d3071faad 100644
--- a/pkg/innerring/state.go
+++ b/pkg/innerring/state.go
@@ -1,7 +1,6 @@
package innerring
import (
- "context"
"fmt"
"sort"
@@ -48,21 +47,21 @@ func (s *Server) SetEpochDuration(val uint64) {
}
// IsActive is a getter for a global active flag state.
-func (s *Server) IsActive(ctx context.Context) bool {
- return s.InnerRingIndex(ctx) >= 0
+func (s *Server) IsActive() bool {
+ return s.InnerRingIndex() >= 0
}
// IsAlphabet is a getter for a global alphabet flag state.
-func (s *Server) IsAlphabet(ctx context.Context) bool {
- return s.AlphabetIndex(ctx) >= 0
+func (s *Server) IsAlphabet() bool {
+ return s.AlphabetIndex() >= 0
}
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative
// index means that node is not in the inner ring list.
-func (s *Server) InnerRingIndex(ctx context.Context) int {
- index, err := s.statusIndex.InnerRingIndex(ctx)
+func (s *Server) InnerRingIndex() int {
+ index, err := s.statusIndex.InnerRingIndex()
if err != nil {
- s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
+ s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
return -1
}
@@ -71,10 +70,10 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
// InnerRingSize is a getter for a global size of inner ring list. This value
// paired with inner ring index.
-func (s *Server) InnerRingSize(ctx context.Context) int {
- size, err := s.statusIndex.InnerRingSize(ctx)
+func (s *Server) InnerRingSize() int {
+ size, err := s.statusIndex.InnerRingSize()
if err != nil {
- s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
+ s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
return 0
}
@@ -83,28 +82,28 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
// AlphabetIndex is a getter for a global index of node in alphabet list.
// Negative index means that node is not in the alphabet list.
-func (s *Server) AlphabetIndex(ctx context.Context) int {
- index, err := s.statusIndex.AlphabetIndex(ctx)
+func (s *Server) AlphabetIndex() int {
+ index, err := s.statusIndex.AlphabetIndex()
if err != nil {
- s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
+ s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
return -1
}
return int(index)
}
-func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
+func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) error {
validators := prm.Validators
- index := s.InnerRingIndex(ctx)
+ index := s.InnerRingIndex()
if s.contracts.alphabet.indexOutOfRange(index) {
- s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
+ s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
return nil
}
if len(validators) == 0 {
- s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
+ s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
return nil
}
@@ -127,12 +126,12 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
}
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
- _, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
+ _, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
if err != nil {
- s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
+ s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
})
@@ -141,9 +140,9 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
// VoteForSidechainValidator calls vote method on alphabet contracts with
// the provided list of keys.
-func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
+func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) error {
sort.Sort(prm.Validators)
- return s.voteForSidechainValidator(ctx, prm)
+ return s.voteForSidechainValidator(prm)
}
// ResetEpochTimer resets the block timer that produces events to update epoch
@@ -154,17 +153,17 @@ func (s *Server) ResetEpochTimer(h uint32) error {
return s.epochTimer.Reset()
}
-func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) {
+func (s *Server) setHealthStatus(hs control.HealthStatus) {
s.healthStatus.Store(int32(hs))
- s.notifySystemd(ctx, hs)
+ s.notifySystemd(hs)
if s.irMetrics != nil {
s.irMetrics.SetHealth(int32(hs))
}
}
-func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
+func (s *Server) CompareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
- s.notifySystemd(ctx, newSt)
+ s.notifySystemd(newSt)
if s.irMetrics != nil {
s.irMetrics.SetHealth(int32(newSt))
}
@@ -187,7 +186,7 @@ func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, err
return persistStorage, nil
}
-func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) {
+func (s *Server) notifySystemd(st control.HealthStatus) {
if !s.sdNotify {
return
}
@@ -203,6 +202,6 @@ func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) {
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
if err != nil {
- s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
+ s.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}
diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go
index f60ca87c4..fe09f8f2d 100644
--- a/pkg/innerring/state_test.go
+++ b/pkg/innerring/state_test.go
@@ -1,7 +1,6 @@
package innerring
import (
- "context"
"testing"
"time"
@@ -43,12 +42,12 @@ func TestServerState(t *testing.T) {
require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration")
var healthStatus control.HealthStatus = control.HealthStatus_READY
- srv.setHealthStatus(context.Background(), healthStatus)
+ srv.setHealthStatus(healthStatus)
require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status")
- require.True(t, srv.IsActive(context.Background()), "invalid IsActive result")
- require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result")
- require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index")
- require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index")
- require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index")
+ require.True(t, srv.IsActive(), "invalid IsActive result")
+ require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result")
+ require.Equal(t, 0, srv.InnerRingIndex(), "invalid IR index")
+ require.Equal(t, 1, srv.InnerRingSize(), "invalid IR index")
+ require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index")
}
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go
index a6c40f9fa..c787f9d5e 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go
@@ -58,7 +58,7 @@ func defaultCfg(c *cfg) {
},
fullSizeLimit: 1 << 30, // 1GB
objSizeLimit: 1 << 20, // 1MB
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
metrics: &NoopMetrics{},
}
}
@@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option {
// WithLogger returns an option to specify Blobovnicza's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))}
}
}
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
index 95fdd844b..caee770e8 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
@@ -69,10 +69,10 @@ func TestBlobovnicza(t *testing.T) {
defer os.Remove(p)
// open Blobovnicza
- require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Open())
// initialize Blobovnicza
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Init())
// try to read non-existent address
testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound)
@@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) {
return err == nil
}, nil)
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
}
diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go
index 4947512cc..aeaa4e1d5 100644
--- a/pkg/local_object_storage/blobovnicza/control.go
+++ b/pkg/local_object_storage/blobovnicza/control.go
@@ -1,7 +1,6 @@
package blobovnicza
import (
- "context"
"errors"
"fmt"
"path/filepath"
@@ -16,7 +15,7 @@ import (
//
// If the database file does not exist, it will be created automatically.
// If blobovnicza is already open, does nothing.
-func (b *Blobovnicza) Open(ctx context.Context) error {
+func (b *Blobovnicza) Open() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -24,7 +23,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error {
return nil
}
- b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB,
+ b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
zap.String("path", b.path),
zap.Bool("ro", b.boltOptions.ReadOnly),
)
@@ -38,7 +37,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error {
}
}
- b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB,
+ b.log.Debug(logs.BlobovniczaOpeningBoltDB,
zap.String("path", b.path),
zap.Stringer("permissions", b.perm),
)
@@ -56,7 +55,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error {
//
// If Blobovnicza is already initialized, no action is taken.
// Blobovnicza must be open, otherwise an error will return.
-func (b *Blobovnicza) Init(ctx context.Context) error {
+func (b *Blobovnicza) Init() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -64,7 +63,7 @@ func (b *Blobovnicza) Init(ctx context.Context) error {
return errors.New("blobovnicza is not open")
}
- b.log.Debug(ctx, logs.BlobovniczaInitializing,
+ b.log.Debug(logs.BlobovniczaInitializing,
zap.Uint64("object size limit", b.objSizeLimit),
zap.Uint64("storage size limit", b.fullSizeLimit),
)
@@ -72,7 +71,7 @@ func (b *Blobovnicza) Init(ctx context.Context) error {
size := b.dataSize.Load()
items := b.itemsCount.Load()
if size != 0 || items != 0 {
- b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
+ b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
return nil
}
@@ -82,7 +81,7 @@ func (b *Blobovnicza) Init(ctx context.Context) error {
// create size range bucket
rangeStr := stringifyBounds(lower, upper)
- b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange,
+ b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
zap.String("range", rangeStr))
_, err := tx.CreateBucketIfNotExists(key)
@@ -99,14 +98,14 @@ func (b *Blobovnicza) Init(ctx context.Context) error {
}
}
- return b.initializeCounters(ctx)
+ return b.initializeCounters()
}
func (b *Blobovnicza) ObjectsCount() uint64 {
return b.itemsCount.Load()
}
-func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
+func (b *Blobovnicza) initializeCounters() error {
var size uint64
var items uint64
var sizeExists bool
@@ -129,20 +128,20 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
})
})
if err != nil {
- return fmt.Errorf("determine DB size: %w", err)
+ return fmt.Errorf("can't determine DB size: %w", err)
}
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
- b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
+ b.log.Debug(logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
if err := saveDataSize(tx, size); err != nil {
return err
}
return saveItemsCount(tx, items)
}); err != nil {
- b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
- return fmt.Errorf("save blobovnicza's size and items count: %w", err)
+ b.log.Debug(logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
+ return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
}
- b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
+ b.log.Debug(logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
}
b.dataSize.Store(size)
@@ -155,7 +154,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
// Close releases all internal database resources.
//
// If blobovnicza is already closed, does nothing.
-func (b *Blobovnicza) Close(ctx context.Context) error {
+func (b *Blobovnicza) Close() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -163,7 +162,7 @@ func (b *Blobovnicza) Close(ctx context.Context) error {
return nil
}
- b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB,
+ b.log.Debug(logs.BlobovniczaClosingBoltDB,
zap.String("path", b.path),
)
diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go
index 8f24b5675..5d6787897 100644
--- a/pkg/local_object_storage/blobovnicza/delete.go
+++ b/pkg/local_object_storage/blobovnicza/delete.go
@@ -6,6 +6,7 @@ import (
"syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -90,9 +91,10 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
}
if err == nil && found {
- b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
+ b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
b.itemDeleted(recordSize)
}
diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go
index 5a382c159..c464abc87 100644
--- a/pkg/local_object_storage/blobovnicza/get_test.go
+++ b/pkg/local_object_storage/blobovnicza/get_test.go
@@ -14,11 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
var blz *Blobovnicza
- defer func() { require.NoError(t, blz.Close(context.Background())) }()
+ defer func() { require.NoError(t, blz.Close()) }()
fnInit := func(szLimit uint64) {
if blz != nil {
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
}
blz = New(
@@ -26,8 +26,8 @@ func TestBlobovnicza_Get(t *testing.T) {
WithObjectSizeLimit(szLimit),
)
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
}
// initial distribution: [0:32K] (32K:64K]
diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go
index cd33b263c..01e5529da 100644
--- a/pkg/local_object_storage/blobovnicza/iterate.go
+++ b/pkg/local_object_storage/blobovnicza/iterate.go
@@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes,
if prm.ignoreErrors {
return nil
}
- return fmt.Errorf("decode address key: %w", err)
+ return fmt.Errorf("could not decode address key: %w", err)
}
}
diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go
index 717274781..5db1e4165 100644
--- a/pkg/local_object_storage/blobovnicza/iterate_test.go
+++ b/pkg/local_object_storage/blobovnicza/iterate_test.go
@@ -15,8 +15,8 @@ import (
func TestBlobovniczaIterate(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
b := New(WithPath(filename))
- require.NoError(t, b.Open(context.Background()))
- require.NoError(t, b.Init(context.Background()))
+ require.NoError(t, b.Open())
+ require.NoError(t, b.Init())
data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}}
addr := oidtest.Address()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go
index dbaa7387a..603c6abe3 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/active.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/active.go
@@ -1,7 +1,6 @@
package blobovniczatree
import (
- "context"
"path/filepath"
"sync"
@@ -18,8 +17,8 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
return db.blz
}
-func (db *activeDB) Close(ctx context.Context) {
- db.shDB.Close(ctx)
+func (db *activeDB) Close() {
+ db.shDB.Close()
}
func (db *activeDB) SystemPath() string {
@@ -54,8 +53,8 @@ func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager
// GetOpenedActiveDBForLevel returns active DB for level.
// DB must be closed after use.
-func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) {
- activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath)
+func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) {
+ activeDB, err := m.getCurrentActiveIfOk(lvlPath)
if err != nil {
return nil, err
}
@@ -63,7 +62,7 @@ func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath
return activeDB, nil
}
- return m.updateAndGetActive(ctx, lvlPath)
+ return m.updateAndGetActive(lvlPath)
}
func (m *activeDBManager) Open() {
@@ -73,18 +72,18 @@ func (m *activeDBManager) Open() {
m.closed = false
}
-func (m *activeDBManager) Close(ctx context.Context) {
+func (m *activeDBManager) Close() {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
for _, db := range m.levelToActiveDB {
- db.Close(ctx)
+ db.Close()
}
m.levelToActiveDB = make(map[string]*sharedDB)
m.closed = true
}
-func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) {
+func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) {
m.levelToActiveDBGuard.RLock()
defer m.levelToActiveDBGuard.RUnlock()
@@ -97,13 +96,13 @@ func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath stri
return nil, nil
}
- blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close()
+ blz, err := db.Open() // open db for usage, will be closed on activeDB.Close()
if err != nil {
return nil, err
}
if blz.IsFull() {
- db.Close(ctx)
+ db.Close()
return nil, nil
}
@@ -113,11 +112,11 @@ func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath stri
}, nil
}
-func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) {
+func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) {
m.levelLock.Lock(lvlPath)
defer m.levelLock.Unlock(lvlPath)
- current, err := m.getCurrentActiveIfOk(ctx, lvlPath)
+ current, err := m.getCurrentActiveIfOk(lvlPath)
if err != nil {
return nil, err
}
@@ -125,7 +124,7 @@ func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string
return current, nil
}
- nextShDB, err := m.getNextSharedDB(ctx, lvlPath)
+ nextShDB, err := m.getNextSharedDB(lvlPath)
if err != nil {
return nil, err
}
@@ -134,7 +133,7 @@ func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string
return nil, nil
}
- blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage
+ blz, err := nextShDB.Open() // open db for client, client must call Close() after usage
if err != nil {
return nil, err
}
@@ -144,7 +143,7 @@ func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string
}, nil
}
-func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) {
+func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
var nextActiveDBIdx uint64
hasActive, currentIdx := m.hasActiveDB(lvlPath)
if hasActive {
@@ -161,17 +160,17 @@ func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (
path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
next := m.dbManager.GetByPath(path)
- _, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
+ _, err := next.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
if err != nil {
return nil, err
}
previous, updated := m.replace(lvlPath, next)
if !updated && next != nil {
- next.Close(ctx) // manager is closed, so don't hold active DB open
+ next.Close() // manager is closed, so don't hold active DB open
}
if updated && previous != nil {
- previous.Close(ctx)
+ previous.Close()
}
return next, nil
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
index 3e8b9f07b..c909113c7 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
@@ -158,16 +158,16 @@ func (b *Blobovniczas) Path() string {
}
// SetCompressor implements common.Storage.
-func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) {
+func (b *Blobovniczas) SetCompressor(cc *compression.Config) {
b.compression = cc
}
-func (b *Blobovniczas) Compressor() *compression.Compressor {
+func (b *Blobovniczas) Compressor() *compression.Config {
return b.compression
}
// SetReportErrorFunc implements common.Storage.
-func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) {
+func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) {
b.reportError = f
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
index 04ff5120c..5c103c1bb 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
@@ -32,7 +32,7 @@ func newDBCache(parentCtx context.Context, size int,
ch := cache.NewCache[string, *sharedDB]().
WithTTL(ttl).WithLRU().WithMaxKeys(size).
WithOnEvicted(func(_ string, db *sharedDB) {
- db.Close(parentCtx)
+ db.Close()
})
ctx, cancel := context.WithCancel(parentCtx)
res := &dbCache{
@@ -81,12 +81,12 @@ func (c *dbCache) Close() {
c.closed = true
}
-func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB {
+func (c *dbCache) GetOrCreate(path string) *sharedDB {
value := c.getExisted(path)
if value != nil {
return value
}
- return c.create(ctx, path)
+ return c.create(path)
}
func (c *dbCache) EvictAndMarkNonCached(path string) {
@@ -122,7 +122,7 @@ func (c *dbCache) getExisted(path string) *sharedDB {
return nil
}
-func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
+func (c *dbCache) create(path string) *sharedDB {
c.pathLock.Lock(path)
defer c.pathLock.Unlock(path)
@@ -133,12 +133,12 @@ func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
value = c.dbManager.GetByPath(path)
- _, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed
+ _, err := value.Open() // open db to hold reference, closed by evictedDB.Close() or if cache closed
if err != nil {
return value
}
if added := c.put(path, value); !added {
- value.Close(ctx)
+ value.Close()
}
return value
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
index f87f4a144..cc8a52d03 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
@@ -19,8 +19,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
st := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(10),
WithBlobovniczaShallowDepth(1),
@@ -28,7 +27,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
require.NoError(t, st.Open(mode.ComponentReadWrite))
require.NoError(t, st.Init())
defer func() {
- require.NoError(t, st.Close(context.Background()))
+ require.NoError(t, st.Close())
}()
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
index a6c1ce368..681cf876c 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
@@ -24,10 +24,10 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
//
// Should be called exactly once.
func (b *Blobovniczas) Init() error {
- b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas)
+ b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
if b.readOnly {
- b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
+ b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
return nil
}
@@ -41,34 +41,35 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
}
eg, egCtx := errgroup.WithContext(ctx)
- if b.blzInitWorkerCount > 0 {
- eg.SetLimit(b.blzInitWorkerCount + 1)
- }
- eg.Go(func() error {
- return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) {
- eg.Go(func() error {
- p = strings.TrimSuffix(p, rebuildSuffix)
- shBlz := b.getBlobovniczaWithoutCaching(p)
- blz, err := shBlz.Open(egCtx)
- if err != nil {
- return err
- }
- defer shBlz.Close(egCtx)
+ eg.SetLimit(b.blzInitWorkerCount)
+ err = b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) {
+ eg.Go(func() error {
+ p = strings.TrimSuffix(p, rebuildSuffix)
+ shBlz := b.getBlobovniczaWithoutCaching(p)
+ blz, err := shBlz.Open()
+ if err != nil {
+ return err
+ }
+ defer shBlz.Close()
- moveInfo, err := blz.ListMoveInfo(egCtx)
- if err != nil {
- return err
- }
- for _, move := range moveInfo {
- b.deleteProtectedObjects.Add(move.Address)
- }
+ moveInfo, err := blz.ListMoveInfo(egCtx)
+ if err != nil {
+ return err
+ }
+ for _, move := range moveInfo {
+ b.deleteProtectedObjects.Add(move.Address)
+ }
- b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
- return nil
- })
- return false, nil
+ b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
+ return nil
})
+ return false, nil
})
+ if err != nil {
+ _ = eg.Wait()
+ return err
+ }
+
return eg.Wait()
}
@@ -79,9 +80,9 @@ func (b *Blobovniczas) openManagers() {
}
// Close implements common.Storage.
-func (b *Blobovniczas) Close(ctx context.Context) error {
+func (b *Blobovniczas) Close() error {
b.dbCache.Close() // order important
- b.activeDBManager.Close(ctx)
+ b.activeDBManager.Close()
b.commondbManager.Close()
return nil
@@ -90,8 +91,8 @@ func (b *Blobovniczas) Close(ctx context.Context) error {
// returns blobovnicza with path p
//
// If blobovnicza is already cached, instance from cache is returned w/o changes.
-func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB {
- return b.dbCache.GetOrCreate(ctx, p)
+func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB {
+ return b.dbCache.GetOrCreate(p)
}
func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
index 7db1891f9..f0a32ded1 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
@@ -2,9 +2,6 @@ package blobovniczatree
import (
"context"
- "os"
- "path"
- "strconv"
"testing"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -54,7 +51,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, obj35, gRes.Object)
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
// change depth and width
blz = NewBlobovniczaTree(
@@ -92,7 +89,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
})
require.NoError(t, err)
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
// change depth and width back
blz = NewBlobovniczaTree(
@@ -130,36 +127,5 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, obj52, gRes.Object)
- require.NoError(t, blz.Close(context.Background()))
-}
-
-func TestInitBlobovniczasInitErrorType(t *testing.T) {
- t.Parallel()
-
- rootDir := t.TempDir()
-
- for idx := 0; idx < 10; idx++ {
- f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"))
- require.NoError(t, err)
- _, err = f.Write([]byte("invalid db"))
- require.NoError(t, err)
- require.NoError(t, f.Close())
-
- f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix))
- require.NoError(t, err)
- require.NoError(t, f.Close())
- }
-
- blz := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaShallowDepth(1),
- WithBlobovniczaShallowWidth(1),
- WithRootPath(rootDir),
- )
-
- require.NoError(t, blz.Open(mode.ComponentReadWrite))
- err := blz.Init()
- require.Contains(t, err.Error(), "open blobovnicza")
- require.Contains(t, err.Error(), "invalid database")
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go
index b83849c77..cf91637d7 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/count.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/count.go
@@ -16,17 +16,17 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) {
b.metrics.ObjectsCount(time.Since(startedAt), success)
}()
- ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
defer span.End()
var result uint64
err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
shDB := b.getBlobovniczaWithoutCaching(p)
- blz, err := shDB.Open(ctx)
+ blz, err := shDB.Open()
if err != nil {
return true, err
}
- defer shDB.Close(ctx)
+ defer shDB.Close()
result += blz.ObjectsCount()
return false, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
index d096791c3..298de3ad6 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -18,10 +19,7 @@ import (
"go.uber.org/zap"
)
-var (
- errObjectIsDeleteProtected = errors.New("object is delete protected")
- deleteRes = common.DeleteRes{}
-)
+var errObjectIsDeleteProtected = errors.New("object is delete protected")
// Delete deletes object from blobovnicza tree.
//
@@ -45,17 +43,17 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
defer span.End()
if b.readOnly {
- return deleteRes, common.ErrReadOnly
+ return common.DeleteRes{}, common.ErrReadOnly
}
if b.rebuildGuard.TryRLock() {
defer b.rebuildGuard.RUnlock()
} else {
- return deleteRes, errRebuildInProgress
+ return common.DeleteRes{}, errRebuildInProgress
}
if b.deleteProtectedObjects.Contains(prm.Address) {
- return deleteRes, errObjectIsDeleteProtected
+ return common.DeleteRes{}, errObjectIsDeleteProtected
}
var bPrm blobovnicza.DeletePrm
@@ -63,12 +61,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(ctx, id.Path())
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(id.Path())
+ blz, err := shBlz.Open()
if err != nil {
return res, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
success = true
@@ -82,9 +80,10 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
+ b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
zap.String("level", p),
- zap.Error(err),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
}
@@ -99,7 +98,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if err == nil && !objectFound {
// not found in any blobovnicza
- return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
success = err == nil
@@ -110,12 +109,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
//
// returns no error if object was removed from some blobovnicza of the same level.
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
- shBlz := b.getBlobovnicza(ctx, blzPath)
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(blzPath)
+ blz, err := shBlz.Open()
if err != nil {
- return deleteRes, err
+ return common.DeleteRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
return b.deleteObject(ctx, blz, prm)
}
@@ -123,5 +122,5 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz
// removes object from blobovnicza and returns common.DeleteRes.
func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) {
_, err := blz.Delete(ctx, prm)
- return deleteRes, err
+ return common.DeleteRes{}, err
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
index 0c5e48821..a64b2bbb1 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"go.opentelemetry.io/otel/attribute"
@@ -36,12 +37,12 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(ctx, id.Path())
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(id.Path())
+ blz, err := shBlz.Open()
if err != nil {
return common.ExistsRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
exists, err := blz.Exists(ctx, prm.Address)
return common.ExistsRes{Exists: exists}, err
@@ -54,9 +55,10 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
_, err := b.getObjectFromLevel(ctx, gPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
index df2b4ffe5..d6ffd8bce 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
@@ -19,8 +19,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@@ -28,7 +27,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
WithBlobovniczaSize(1<<20))
require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
- defer func() { require.NoError(t, b.Close(context.Background())) }()
+ defer func() { require.NoError(t, b.Close()) }()
obj := blobstortest.NewObject(1024)
addr := object.AddressOf(obj)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
index 9244d765c..d390ecf1d 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
@@ -15,8 +15,7 @@ func TestGeneric(t *testing.T) {
helper := func(t *testing.T, dir string) common.Storage {
return NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@@ -44,8 +43,7 @@ func TestControl(t *testing.T) {
newTree := func(t *testing.T) common.Storage {
return NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
index e5c83e5f2..08cacda8a 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -47,12 +48,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(ctx, id.Path())
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(id.Path())
+ blz, err := shBlz.Open()
if err != nil {
return res, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
res, err = b.getObject(ctx, blz, bPrm)
if err == nil {
@@ -66,9 +67,10 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
res, err = b.getObjectFromLevel(ctx, bPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.Error(err),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
}
@@ -93,12 +95,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
// open blobovnicza (cached inside)
- shBlz := b.getBlobovnicza(ctx, blzPath)
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(blzPath)
+ blz, err := shBlz.Open()
if err != nil {
return common.GetRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
return b.getObject(ctx, blz, prm)
}
@@ -113,13 +115,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
- return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
+ return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
+ return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
index 27d13f4f3..d237ae439 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
@@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -46,12 +47,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(ctx, id.Path())
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(id.Path())
+ blz, err := shBlz.Open()
if err != nil {
return common.GetRangeRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
res, err := b.getObjectRange(ctx, blz, prm)
if err == nil {
@@ -68,9 +69,10 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if err != nil {
outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !client.IsErrObjectNotFound(err) {
- b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if outOfBounds {
return true, err
@@ -101,12 +103,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
// open blobovnicza (cached inside)
- shBlz := b.getBlobovnicza(ctx, blzPath)
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(blzPath)
+ blz, err := shBlz.Open()
if err != nil {
return common.GetRangeRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
return b.getObjectRange(ctx, blz, prm)
}
@@ -128,13 +130,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
- return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err)
+ return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err)
+ return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
from := prm.Range.GetOffset()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
index ceb8fb7e3..f6acb46aa 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
@@ -42,14 +42,14 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
data, err := b.compression.Decompress(elem.ObjectData())
if err != nil {
if prm.IgnoreErrors {
- b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", elem.Address()),
- zap.Error(err),
+ zap.String("err", err.Error()),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
return nil
}
- return fmt.Errorf("decompress object data: %w", err)
+ return fmt.Errorf("could not decompress object data: %w", err)
}
if prm.Handler != nil {
@@ -72,19 +72,19 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
// iterator over all Blobovniczas in unsorted order. Break on f's error return.
func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
- shBlz := b.getBlobovnicza(ctx, p)
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(p)
+ blz, err := shBlz.Open()
if err != nil {
if ignoreErrors {
- b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
- zap.Error(err),
+ b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.String("err", err.Error()),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
return false, nil
}
- return false, fmt.Errorf("open blobovnicza %s: %w", p, err)
+ return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
err = f(p, blz)
@@ -249,12 +249,6 @@ func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Addres
}
func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) {
- select {
- case <-ctx.Done():
- return false, ctx.Err()
- default:
- }
-
sysPath := filepath.Join(b.rootPath, path)
entries, err := os.ReadDir(sysPath)
if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
index 6438f715b..4fdde15a9 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
@@ -1,7 +1,6 @@
package blobovniczatree
import (
- "context"
"errors"
"fmt"
"os"
@@ -49,7 +48,7 @@ func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
}
}
-func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
+func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
if b.closedFlag.Load() {
return nil, errClosed
}
@@ -68,11 +67,11 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
blobovnicza.WithMetrics(b.metrics),
)...)
- if err := blz.Open(ctx); err != nil {
- return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err)
+ if err := blz.Open(); err != nil {
+ return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
}
- if err := blz.Init(ctx); err != nil {
- return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err)
+ if err := blz.Init(); err != nil {
+ return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
}
b.refCount++
@@ -82,22 +81,22 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
return blz, nil
}
-func (b *sharedDB) Close(ctx context.Context) {
+func (b *sharedDB) Close() {
b.cond.L.Lock()
defer b.cond.L.Unlock()
if b.refCount == 0 {
- b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
+ b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
b.cond.Broadcast()
return
}
if b.refCount == 1 {
b.refCount = 0
- if err := b.blcza.Close(ctx); err != nil {
- b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+ if err := b.blcza.Close(); err != nil {
+ b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
}
b.blcza = nil
@@ -111,7 +110,7 @@ func (b *sharedDB) Close(ctx context.Context) {
}
}
-func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
+func (b *sharedDB) CloseAndRemoveFile() error {
b.cond.L.Lock()
if b.refCount > 1 {
b.cond.Wait()
@@ -122,12 +121,12 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
return errClosingClosedBlobovnicza
}
- if err := b.blcza.Close(ctx); err != nil {
- b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+ if err := b.blcza.Close(); err != nil {
+ b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
- return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err)
+ return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err)
}
b.refCount = 0
@@ -141,8 +140,8 @@ func (b *sharedDB) SystemPath() string {
return b.path
}
-// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
-type levelDBManager struct {
+// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
+type levelDbManager struct {
dbMtx *sync.RWMutex
databases map[uint64]*sharedDB
@@ -157,8 +156,8 @@ type levelDBManager struct {
func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string,
readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger,
-) *levelDBManager {
- result := &levelDBManager{
+) *levelDbManager {
+ result := &levelDbManager{
databases: make(map[uint64]*sharedDB),
dbMtx: &sync.RWMutex{},
@@ -173,7 +172,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st
return result
}
-func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB {
+func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
res := m.getDBIfExists(idx)
if res != nil {
return res
@@ -181,14 +180,14 @@ func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB {
return m.getOrCreateDB(idx)
}
-func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB {
+func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB {
m.dbMtx.RLock()
defer m.dbMtx.RUnlock()
return m.databases[idx]
}
-func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB {
+func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB {
m.dbMtx.Lock()
defer m.dbMtx.Unlock()
@@ -202,7 +201,7 @@ func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB {
return db
}
-func (m *levelDBManager) hasAnyDB() bool {
+func (m *levelDbManager) hasAnyDB() bool {
m.dbMtx.RLock()
defer m.dbMtx.RUnlock()
@@ -213,7 +212,7 @@ func (m *levelDBManager) hasAnyDB() bool {
//
// The blobovnicza opens at the first request, closes after the last request.
type dbManager struct {
- levelToManager map[string]*levelDBManager
+ levelToManager map[string]*levelDbManager
levelToManagerGuard *sync.RWMutex
closedFlag *atomic.Bool
dbCounter *openDBCounter
@@ -231,7 +230,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool,
options: options,
readOnly: readOnly,
metrics: metrics,
- levelToManager: make(map[string]*levelDBManager),
+ levelToManager: make(map[string]*levelDbManager),
levelToManagerGuard: &sync.RWMutex{},
log: log,
closedFlag: &atomic.Bool{},
@@ -266,7 +265,7 @@ func (m *dbManager) Close() {
m.dbCounter.WaitUntilAllClosed()
}
-func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
+func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
result := m.getLevelManagerIfExists(lvlPath)
if result != nil {
return result
@@ -274,14 +273,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
return m.getOrCreateLevelManager(lvlPath)
}
-func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager {
+func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager {
m.levelToManagerGuard.RLock()
defer m.levelToManagerGuard.RUnlock()
return m.levelToManager[lvlPath]
}
-func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager {
+func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager {
m.levelToManagerGuard.Lock()
defer m.levelToManagerGuard.Unlock()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
index 5f268b0f2..008be9543 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
@@ -1,7 +1,6 @@
package blobovniczatree
import (
- "context"
"io/fs"
"time"
@@ -19,9 +18,9 @@ type cfg struct {
openedCacheSize int
blzShallowDepth uint64
blzShallowWidth uint64
- compression *compression.Compressor
+ compression *compression.Config
blzOpts []blobovnicza.Option
- reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors.
+ reportError func(string, error) // reportError is the function called when encountering disk errors.
metrics Metrics
waitBeforeDropDB time.Duration
blzInitWorkerCount int
@@ -48,14 +47,14 @@ const (
func initConfig(c *cfg) {
*c = cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
perm: defaultPerm,
openedCacheSize: defaultOpenedCacheSize,
openedCacheTTL: defaultOpenedCacheTTL,
openedCacheExpInterval: defaultOpenedCacheInterval,
blzShallowDepth: defaultBlzShallowDepth,
blzShallowWidth: defaultBlzShallowWidth,
- reportError: func(context.Context, string, error) {},
+ reportError: func(string, error) {},
metrics: &noopMetrics{},
waitBeforeDropDB: defaultWaitBeforeDropDB,
blzInitWorkerCount: defaultBlzInitWorkerCount,
@@ -63,15 +62,10 @@ func initConfig(c *cfg) {
}
}
-func WithBlobovniczaTreeLogger(log *logger.Logger) Option {
+func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = log
- }
-}
-
-func WithBlobovniczaLogger(log *logger.Logger) Option {
- return func(c *cfg) {
- c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log))
+ c.log = l
+ c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l))
}
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
index 37c49d741..76c4953e4 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -76,34 +77,37 @@ type putIterator struct {
}
func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
- active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
+ active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
if err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
- i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
- zap.Error(err))
+ i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return false, nil
}
if active == nil {
- i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
+ i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return false, nil
}
- defer active.Close(ctx)
+ defer active.Close()
i.AllFull = false
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
if err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
- i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
+ i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", active.SystemPath()),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if errors.Is(err, blobovnicza.ErrNoSpace) {
i.AllFull = true
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index a840275b8..202d38cd7 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -49,25 +49,25 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
var res common.RebuildRes
- b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
- completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter)
+ b.log.Debug(logs.BlobovniczaTreeCompletingPreviousRebuild)
+ completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage)
res.ObjectsMoved += completedPreviosMoves
if err != nil {
- b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
+ b.log.Warn(logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
success = false
return res, err
}
- b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
+ b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
- b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild)
+ b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
if err != nil {
- b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
+ b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
success = false
return res, err
}
- b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
+ b.log.Info(logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
if err != nil {
success = false
@@ -78,14 +78,14 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
var completedDBCount uint32
for _, db := range dbs {
- b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
- movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter)
+ b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
+ movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter)
res.ObjectsMoved += movedObjects
if err != nil {
- b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
+ b.log.Warn(logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
return res, err
}
- b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
+ b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
res.FilesRemoved++
completedDBCount++
b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
@@ -165,7 +165,7 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
continue
}
path := filepath.Join(lvlPath, e.Name())
- resettlementRequired, err := b.rebuildBySize(ctx, path, target)
+ resettlementRequired, err := b.rebuildBySize(path, target)
if err != nil {
return false, err
}
@@ -180,13 +180,13 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
return result, nil
}
-func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) {
- shDB := b.getBlobovnicza(ctx, path)
- blz, err := shDB.Open(ctx)
+func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, error) {
+ shDB := b.getBlobovnicza(path)
+ blz, err := shDB.Open()
if err != nil {
return false, err
}
- defer shDB.Close(ctx)
+ defer shDB.Close()
fp := blz.FillPercent()
// accepted fill percent defines as
// |----|+++++++++++++++++|+++++++++++++++++|---------------
@@ -195,9 +195,9 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil
return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil
}
-func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) {
- shDB := b.getBlobovnicza(ctx, path)
- blz, err := shDB.Open(ctx)
+func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
+ shDB := b.getBlobovnicza(path)
+ blz, err := shDB.Open()
if err != nil {
return 0, err
}
@@ -206,13 +206,13 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
if shDBClosed {
return
}
- shDB.Close(ctx)
+ shDB.Close()
}()
- dropTempFile, err := b.addRebuildTempFile(ctx, path)
+ dropTempFile, err := b.addRebuildTempFile(path)
if err != nil {
return 0, err
}
- migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter)
+ migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter)
if err != nil {
return migratedObjects, err
}
@@ -224,21 +224,21 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
return migratedObjects, err
}
-func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) {
+func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
sysPath := filepath.Join(b.rootPath, path)
- sysPath += rebuildSuffix
+ sysPath = sysPath + rebuildSuffix
_, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
if err != nil {
return nil, err
}
return func() {
if err := os.Remove(sysPath); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
}
}, nil
}
-func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) {
+func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
var result atomic.Uint64
batch := make(map[oid.Address][]byte)
@@ -253,12 +253,7 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
})
for {
- release, err := limiter.ReadRequest(ctx)
- if err != nil {
- return result.Load(), err
- }
- _, err = blz.Iterate(ctx, prm)
- release()
+ _, err := blz.Iterate(ctx, prm)
if err != nil && !errors.Is(err, errBatchFull) {
return result.Load(), err
}
@@ -270,19 +265,13 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
eg, egCtx := errgroup.WithContext(ctx)
for addr, data := range batch {
- release, err := limiter.AcquireWorkSlot(egCtx)
- if err != nil {
+ if err := limiter.AcquireWorkSlot(egCtx); err != nil {
_ = eg.Wait()
return result.Load(), err
}
eg.Go(func() error {
- defer release()
- moveRelease, err := limiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- err = b.moveObject(egCtx, blz, blzPath, addr, data, meta)
- moveRelease()
+ defer limiter.ReleaseWorkSlot()
+ err := b.moveObject(egCtx, blz, blzPath, addr, data, meta)
if err == nil {
result.Add(1)
}
@@ -328,7 +317,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo
return nil
}
-func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) {
+func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
@@ -341,7 +330,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB)
b.dbFilesGuard.Lock()
defer b.dbFilesGuard.Unlock()
- if err := shDB.CloseAndRemoveFile(ctx); err != nil {
+ if err := shDb.CloseAndRemoveFile(); err != nil {
return false, err
}
b.commondbManager.CleanResources(path)
@@ -370,37 +359,26 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error {
return b.dropDirectoryIfEmpty(filepath.Dir(path))
}
-func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) {
+func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) {
var count uint64
var rebuildTempFilesToRemove []string
err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
rebuildTmpFilePath := s
s = strings.TrimSuffix(s, rebuildSuffix)
- shDB := b.getBlobovnicza(ctx, s)
- blz, err := shDB.Open(ctx)
+ shDB := b.getBlobovnicza(s)
+ blz, err := shDB.Open()
if err != nil {
return true, err
}
- defer shDB.Close(ctx)
+ defer shDB.Close()
- release, err := rateLimiter.ReadRequest(ctx)
- if err != nil {
- return false, err
- }
incompletedMoves, err := blz.ListMoveInfo(ctx)
- release()
if err != nil {
return true, err
}
for _, move := range incompletedMoves {
- release, err := rateLimiter.WriteRequest(ctx)
- if err != nil {
- return false, err
- }
- err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore)
- release()
- if err != nil {
+ if err := b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore); err != nil {
return true, err
}
count++
@@ -410,14 +388,9 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
return false, nil
})
for _, tmp := range rebuildTempFilesToRemove {
- release, err := rateLimiter.WriteRequest(ctx)
- if err != nil {
- return count, err
- }
if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
}
- release()
}
return count, err
}
@@ -425,12 +398,12 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
move blobovnicza.MoveInfo, metaStore common.MetaStorage,
) error {
- targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path())
- target, err := targetDB.Open(ctx)
+ targetDB := b.getBlobovnicza(NewIDFromBytes(move.TargetStorageID).Path())
+ target, err := targetDB.Open()
if err != nil {
return err
}
- defer targetDB.Close(ctx)
+ defer targetDB.Close()
existsInSource := true
var gPrm blobovnicza.GetPrm
@@ -440,14 +413,14 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
if client.IsErrObjectNotFound(err) {
existsInSource = false
} else {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
+ b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
return err
}
}
if !existsInSource { // object was deleted by Rebuild, need to delete move info
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
+ b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
return err
}
b.deleteProtectedObjects.Delete(move.Address)
@@ -456,7 +429,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
existsInTarget, err := target.Exists(ctx, move.Address)
if err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
+ b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
return err
}
@@ -466,25 +439,25 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
putPrm.SetMarshaledObject(gRes.Object())
_, err = target.Put(ctx, putPrm)
if err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
+ b.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
return err
}
}
if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
+ b.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
return err
}
var deletePrm blobovnicza.DeletePrm
deletePrm.SetAddress(move.Address)
if _, err = source.Delete(ctx, deletePrm); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
+ b.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
return err
}
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
+ b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
return err
}
@@ -504,21 +477,21 @@ type moveIterator struct {
}
func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) {
- target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
+ target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
if err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
+ i.B.log.Warn(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
}
return false, nil
}
if target == nil {
- i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
+ i.B.log.Warn(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
return false, nil
}
- defer target.Close(ctx)
+ defer target.Close()
i.AllFull = false
@@ -530,9 +503,9 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
TargetStorageID: targetStorageID.Bytes(),
}); err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
} else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
+ i.B.log.Warn(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}
@@ -546,15 +519,15 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
_, err = target.Blobovnicza().Put(ctx, putPrm)
if err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
+ i.B.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
}
return true, nil
}
if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
+ i.B.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
return true, nil
}
@@ -562,18 +535,18 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
deletePrm.SetAddress(i.Address)
if _, err = i.Source.Delete(ctx, deletePrm); err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err)
} else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
+ i.B.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}
if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err)
} else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
+ i.B.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
index 4146ef260..b177d20fc 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
@@ -35,8 +35,8 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@@ -53,7 +53,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
@@ -65,8 +65,8 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@@ -83,19 +83,19 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
_, err = blz.Put(context.Background(), pPrm)
require.NoError(t, err)
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
testRebuildFailoverValidate(t, dir, obj, true)
}
@@ -105,8 +105,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@@ -117,14 +117,14 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
var pPrm blobovnicza.PutPrm
pPrm.SetAddress(object.AddressOf(obj))
@@ -132,7 +132,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
_, err = blz.Put(context.Background(), pPrm)
require.NoError(t, err)
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
testRebuildFailoverValidate(t, dir, obj, false)
}
@@ -140,8 +140,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) {
b := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(2048),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@@ -162,22 +161,20 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
storageIDs: make(map[oid.Address][]byte),
guard: &sync.Mutex{},
}
- limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 1,
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ FillPercent: 1,
})
require.NoError(t, err)
require.Equal(t, uint64(1), rRes.ObjectsMoved)
require.Equal(t, uint64(0), rRes.FilesRemoved)
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
+ require.NoError(t, b.Close())
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
moveInfo, err := blz.ListMoveInfo(context.Background())
require.NoError(t, err)
@@ -188,11 +185,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
_, err = blz.Get(context.Background(), gPrm)
require.True(t, client.IsErrObjectNotFound(err))
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
moveInfo, err = blz.ListMoveInfo(context.Background())
require.NoError(t, err)
@@ -206,7 +203,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)]))
}
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
_, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
require.True(t, os.IsNotExist(err))
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index a7a99fec3..dfd928aaf 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -2,9 +2,7 @@ package blobovniczatree
import (
"context"
- "fmt"
"sync"
- "sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -50,8 +48,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
@@ -64,7 +61,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Init())
storageIDs := make(map[oid.Address][]byte)
- for range 100 {
+ for i := 0; i < 100; i++ {
obj := blobstortest.NewObject(64 * 1024) // 64KB object
data, err := obj.Marshal()
require.NoError(t, err)
@@ -79,11 +76,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
- limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 60,
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ FillPercent: 60,
})
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -97,8 +93,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
+ require.NoError(t, b.Close())
})
t.Run("no rebuild single db", func(t *testing.T) {
@@ -107,8 +102,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
@@ -134,11 +128,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
- limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 90, // 64KB / 100KB = 64%
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ FillPercent: 90, // 64KB / 100KB = 64%
})
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -152,8 +145,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
+ require.NoError(t, b.Close())
})
t.Run("rebuild by fill percent", func(t *testing.T) {
@@ -162,8 +154,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
@@ -177,7 +168,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs := make(map[oid.Address][]byte)
toDelete := make(map[oid.Address][]byte)
- for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
obj := blobstortest.NewObject(64 * 1024)
data, err := obj.Marshal()
require.NoError(t, err)
@@ -202,11 +193,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
- limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 80,
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ FillPercent: 80,
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
@@ -224,8 +214,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
+ require.NoError(t, b.Close())
})
t.Run("rebuild by overflow", func(t *testing.T) {
@@ -234,8 +223,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
@@ -248,7 +236,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Init())
storageIDs := make(map[oid.Address][]byte)
- for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
obj := blobstortest.NewObject(64 * 1024)
data, err := obj.Marshal()
require.NoError(t, err)
@@ -263,11 +251,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
- require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, b.Close())
b = NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1),
WithBlobovniczaShallowDepth(1),
@@ -279,11 +266,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
- limiter := &rebuildLimiterStub{}
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 80,
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ FillPercent: 80,
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
@@ -298,8 +284,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
+ require.NoError(t, b.Close())
})
}
@@ -309,8 +294,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024), // 64KB object size limit
WithBlobovniczaShallowWidth(5),
WithBlobovniczaShallowDepth(2), // depth = 2
@@ -334,12 +318,11 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
storageIDs := make(map[oid.Address][]byte)
storageIDs[prm.Address] = res.StorageID
- require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, b.Close())
b = NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(32*1024), // 32KB object size limit
WithBlobovniczaShallowWidth(5),
WithBlobovniczaShallowDepth(3), // depth = 3
@@ -355,10 +338,9 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
- limiter := &rebuildLimiterStub{}
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
- rPrm.Limiter = limiter
+ rPrm.WorkerLimiter = &rebuildLimiterStub{}
rPrm.FillPercent = 1
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
@@ -373,16 +355,14 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
require.NoError(t, err)
}
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
+ require.NoError(t, b.Close())
}
func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(2048),
WithBlobovniczaShallowWidth(sourceWidth),
WithBlobovniczaShallowDepth(sourceDepth),
@@ -419,12 +399,11 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
}
require.NoError(t, eg.Wait())
- require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, b.Close())
b = NewBlobovniczaTree(
context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(2048),
WithBlobovniczaShallowWidth(targetWidth),
WithBlobovniczaShallowDepth(targetDepth),
@@ -448,10 +427,9 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
- limiter := &rebuildLimiterStub{}
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
- rPrm.Limiter = limiter
+ rPrm.WorkerLimiter = &rebuildLimiterStub{}
rPrm.FillPercent = 1
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
@@ -466,8 +444,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
require.NoError(t, err)
}
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
+ require.NoError(t, b.Close())
}
type storageIDUpdateStub struct {
@@ -485,36 +462,7 @@ func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Addr
return nil
}
-type rebuildLimiterStub struct {
- slots atomic.Int64
- readRequests atomic.Int64
- writeRequests atomic.Int64
-}
+type rebuildLimiterStub struct{}
-func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) {
- s.slots.Add(1)
- return func() { s.slots.Add(-1) }, nil
-}
-
-func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) {
- s.readRequests.Add(1)
- return func() { s.readRequests.Add(-1) }, nil
-}
-
-func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) {
- s.writeRequests.Add(1)
- return func() { s.writeRequests.Add(-1) }, nil
-}
-
-func (s *rebuildLimiterStub) ValidateReleased() error {
- if v := s.slots.Load(); v != 0 {
- return fmt.Errorf("invalid slots value %d", v)
- }
- if v := s.readRequests.Load(); v != 0 {
- return fmt.Errorf("invalid read requests value %d", v)
- }
- if v := s.writeRequests.Load(); v != 0 {
- return fmt.Errorf("invalid write requests value %d", v)
- }
- return nil
-}
+func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) error { return nil }
+func (s *rebuildLimiterStub) ReleaseWorkSlot() {}
diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go
index ceaf2538a..6f579a8ca 100644
--- a/pkg/local_object_storage/blobstor/blobstor.go
+++ b/pkg/local_object_storage/blobstor/blobstor.go
@@ -1,7 +1,6 @@
package blobstor
import (
- "context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -41,14 +40,14 @@ type SubStorageInfo struct {
type Option func(*cfg)
type cfg struct {
- compression compression.Compressor
+ compression compression.Config
log *logger.Logger
storage []SubStorage
metrics Metrics
}
func initConfig(c *cfg) {
- c.log = logger.NewLoggerWrapper(zap.L())
+ c.log = &logger.Logger{Logger: zap.L()}
c.metrics = &noopMetrics{}
}
@@ -91,19 +90,56 @@ func WithStorages(st []SubStorage) Option {
// WithLogger returns option to specify BlobStor's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))}
}
}
-func WithCompression(comp compression.Config) Option {
+// WithCompressObjects returns option to toggle
+// compression of the stored objects.
+//
+// If true, Zstandard algorithm is used for data compression.
+//
+// If compressor (decompressor) creation failed,
+// the uncompressed option will be used, and the error
+// is recorded in the provided log.
+func WithCompressObjects(comp bool) Option {
return func(c *cfg) {
- c.compression.Config = comp
+ c.compression.Enabled = comp
+ }
+}
+
+// WithCompressibilityEstimate returns an option to use
+// normilized compressibility estimate to decide compress
+// data or not.
+//
+// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5
+func WithCompressibilityEstimate(v bool) Option {
+ return func(c *cfg) {
+ c.compression.UseCompressEstimation = v
+ }
+}
+
+// WithCompressibilityEstimateThreshold returns an option to set
+// normilized compressibility estimate threshold.
+//
+// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5
+func WithCompressibilityEstimateThreshold(threshold float64) Option {
+ return func(c *cfg) {
+ c.compression.CompressEstimationThreshold = threshold
+ }
+}
+
+// WithUncompressableContentTypes returns option to disable decompression
+// for specific content types as seen by object.AttributeContentType attribute.
+func WithUncompressableContentTypes(values []string) Option {
+ return func(c *cfg) {
+ c.compression.UncompressableContentTypes = values
}
}
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
-func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) {
+func (b *BlobStor) SetReportErrorFunc(f func(string, error)) {
for i := range b.storage {
b.storage[i].Storage.SetReportErrorFunc(f)
}
@@ -115,6 +151,6 @@ func WithMetrics(m Metrics) Option {
}
}
-func (b *BlobStor) Compressor() *compression.Compressor {
- return &b.compression
+func (b *BlobStor) Compressor() *compression.Config {
+ return &b.cfg.compression
}
diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go
index 6ddeb6f00..bed5e0eb9 100644
--- a/pkg/local_object_storage/blobstor/blobstor_test.go
+++ b/pkg/local_object_storage/blobstor/blobstor_test.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -52,12 +51,10 @@ func TestCompression(t *testing.T) {
newBlobStor := func(t *testing.T, compress bool) *BlobStor {
bs := New(
- WithCompression(compression.Config{
- Enabled: compress,
- }),
+ WithCompressObjects(compress),
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init(context.Background()))
+ require.NoError(t, bs.Init())
return bs
}
@@ -94,20 +91,20 @@ func TestCompression(t *testing.T) {
blobStor := newBlobStor(t, false)
testPut(t, blobStor, 0)
testGet(t, blobStor, 0)
- require.NoError(t, blobStor.Close(context.Background()))
+ require.NoError(t, blobStor.Close())
blobStor = newBlobStor(t, true)
testGet(t, blobStor, 0) // get uncompressed object with compress enabled
testPut(t, blobStor, 1)
testGet(t, blobStor, 1)
- require.NoError(t, blobStor.Close(context.Background()))
+ require.NoError(t, blobStor.Close())
blobStor = newBlobStor(t, false)
testGet(t, blobStor, 0) // get old uncompressed object
testGet(t, blobStor, 1) // get compressed object with compression disabled
testPut(t, blobStor, 2)
testGet(t, blobStor, 2)
- require.NoError(t, blobStor.Close(context.Background()))
+ require.NoError(t, blobStor.Close())
}
func TestBlobstor_needsCompression(t *testing.T) {
@@ -116,10 +113,8 @@ func TestBlobstor_needsCompression(t *testing.T) {
dir := t.TempDir()
bs := New(
- WithCompression(compression.Config{
- Enabled: compress,
- UncompressableContentTypes: ct,
- }),
+ WithCompressObjects(compress),
+ WithUncompressableContentTypes(ct),
WithStorages([]SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
@@ -135,7 +130,7 @@ func TestBlobstor_needsCompression(t *testing.T) {
},
}))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init(context.Background()))
+ require.NoError(t, bs.Init())
return bs
}
@@ -197,7 +192,7 @@ func TestConcurrentPut(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, blobStor.Init(context.Background()))
+ require.NoError(t, blobStor.Init())
testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)})
@@ -277,7 +272,7 @@ func TestConcurrentDelete(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, blobStor.Init(context.Background()))
+ require.NoError(t, blobStor.Init())
testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
var prm common.PutPrm
diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go
index 788fe66f2..19e181ee7 100644
--- a/pkg/local_object_storage/blobstor/common/rebuild.go
+++ b/pkg/local_object_storage/blobstor/common/rebuild.go
@@ -12,27 +12,16 @@ type RebuildRes struct {
}
type RebuildPrm struct {
- MetaStorage MetaStorage
- Limiter RebuildLimiter
- FillPercent int
+ MetaStorage MetaStorage
+ WorkerLimiter ConcurrentWorkersLimiter
+ FillPercent int
}
type MetaStorage interface {
UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error
}
-type ReleaseFunc func()
-
-type ConcurrencyLimiter interface {
- AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error)
-}
-
-type RateLimiter interface {
- ReadRequest(context.Context) (ReleaseFunc, error)
- WriteRequest(context.Context) (ReleaseFunc, error)
-}
-
-type RebuildLimiter interface {
- ConcurrencyLimiter
- RateLimiter
+type ConcurrentWorkersLimiter interface {
+ AcquireWorkSlot(ctx context.Context) error
+ ReleaseWorkSlot()
}
diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go
index e35c35e60..4f3a20993 100644
--- a/pkg/local_object_storage/blobstor/common/storage.go
+++ b/pkg/local_object_storage/blobstor/common/storage.go
@@ -12,18 +12,18 @@ import (
type Storage interface {
Open(mode mode.ComponentMode) error
Init() error
- Close(context.Context) error
+ Close() error
Type() string
Path() string
ObjectsCount(ctx context.Context) (uint64, error)
- SetCompressor(cc *compression.Compressor)
- Compressor() *compression.Compressor
+ SetCompressor(cc *compression.Config)
+ Compressor() *compression.Config
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
- SetReportErrorFunc(f func(context.Context, string, error))
+ SetReportErrorFunc(f func(string, error))
SetParentID(parentID string)
Get(context.Context, GetPrm) (GetRes, error)
diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go
index 445a0494b..9f70f8ec2 100644
--- a/pkg/local_object_storage/blobstor/compression/bench_test.go
+++ b/pkg/local_object_storage/blobstor/compression/bench_test.go
@@ -11,7 +11,7 @@ import (
)
func BenchmarkCompression(b *testing.B) {
- c := Compressor{Config: Config{Enabled: true}}
+ c := Config{Enabled: true}
require.NoError(b, c.Init())
for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} {
@@ -33,7 +33,7 @@ func BenchmarkCompression(b *testing.B) {
}
}
-func benchWith(b *testing.B, c Compressor, data []byte) {
+func benchWith(b *testing.B, c Config, data []byte) {
b.ResetTimer()
b.ReportAllocs()
for range b.N {
@@ -56,10 +56,8 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) {
b.Run("estimate", func(b *testing.B) {
b.ResetTimer()
- c := &Compressor{
- Config: Config{
- Enabled: true,
- },
+ c := &Config{
+ Enabled: true,
}
require.NoError(b, c.Init())
@@ -78,10 +76,8 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) {
b.Run("compress", func(b *testing.B) {
b.ResetTimer()
- c := &Compressor{
- Config: Config{
- Enabled: true,
- },
+ c := &Config{
+ Enabled: true,
}
require.NoError(b, c.Init())
diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go
index c76cec9a1..85ab47692 100644
--- a/pkg/local_object_storage/blobstor/compression/compress.go
+++ b/pkg/local_object_storage/blobstor/compression/compress.go
@@ -4,36 +4,21 @@ import (
"bytes"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/klauspost/compress"
"github.com/klauspost/compress/zstd"
)
-type Level string
-
-const (
- LevelDefault Level = ""
- LevelOptimal Level = "optimal"
- LevelFastest Level = "fastest"
- LevelSmallestSize Level = "smallest_size"
-)
-
-type Compressor struct {
- Config
-
- encoder *zstd.Encoder
- decoder *zstd.Decoder
-}
-
// Config represents common compression-related configuration.
type Config struct {
Enabled bool
UncompressableContentTypes []string
- Level Level
- EstimateCompressibility bool
- EstimateCompressibilityThreshold float64
+ UseCompressEstimation bool
+ CompressEstimationThreshold float64
+
+ encoder *zstd.Encoder
+ decoder *zstd.Decoder
}
// zstdFrameMagic contains first 4 bytes of any compressed object
@@ -41,11 +26,11 @@ type Config struct {
var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
// Init initializes compression routines.
-func (c *Compressor) Init() error {
+func (c *Config) Init() error {
var err error
if c.Enabled {
- c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel()))
+ c.encoder, err = zstd.NewWriter(nil)
if err != nil {
return err
}
@@ -88,7 +73,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool {
// Decompress decompresses data if it starts with the magic
// and returns data untouched otherwise.
-func (c *Compressor) Decompress(data []byte) ([]byte, error) {
+func (c *Config) Decompress(data []byte) ([]byte, error) {
if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) {
return data, nil
}
@@ -97,13 +82,13 @@ func (c *Compressor) Decompress(data []byte) ([]byte, error) {
// Compress compresses data if compression is enabled
// and returns data untouched otherwise.
-func (c *Compressor) Compress(data []byte) []byte {
+func (c *Config) Compress(data []byte) []byte {
if c == nil || !c.Enabled {
return data
}
- if c.EstimateCompressibility {
+ if c.UseCompressEstimation {
estimated := compress.Estimate(data)
- if estimated >= c.EstimateCompressibilityThreshold {
+ if estimated >= c.CompressEstimationThreshold {
return c.compress(data)
}
return data
@@ -111,7 +96,7 @@ func (c *Compressor) Compress(data []byte) []byte {
return c.compress(data)
}
-func (c *Compressor) compress(data []byte) []byte {
+func (c *Config) compress(data []byte) []byte {
maxSize := c.encoder.MaxEncodedSize(len(data))
compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize))
if len(data) < len(compressed) {
@@ -121,7 +106,7 @@ func (c *Compressor) compress(data []byte) []byte {
}
// Close closes encoder and decoder, returns any error occurred.
-func (c *Compressor) Close() error {
+func (c *Config) Close() error {
var err error
if c.encoder != nil {
err = c.encoder.Close()
@@ -131,24 +116,3 @@ func (c *Compressor) Close() error {
}
return err
}
-
-func (c *Config) HasValidCompressionLevel() bool {
- return c.Level == LevelDefault ||
- c.Level == LevelOptimal ||
- c.Level == LevelFastest ||
- c.Level == LevelSmallestSize
-}
-
-func (c *Compressor) compressionLevel() zstd.EncoderLevel {
- switch c.Level {
- case LevelDefault, LevelOptimal:
- return zstd.SpeedDefault
- case LevelFastest:
- return zstd.SpeedFastest
- case LevelSmallestSize:
- return zstd.SpeedBestCompression
- default:
- assert.Fail("unknown compression level", string(c.Level))
- return zstd.SpeedDefault
- }
-}
diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go
index 0418eedd0..9b414a9be 100644
--- a/pkg/local_object_storage/blobstor/control.go
+++ b/pkg/local_object_storage/blobstor/control.go
@@ -6,14 +6,13 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"go.uber.org/zap"
)
// Open opens BlobStor.
func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
- b.log.Debug(ctx, logs.BlobstorOpening)
+ b.log.Debug(logs.BlobstorOpening)
b.modeMtx.Lock()
defer b.modeMtx.Unlock()
@@ -51,13 +50,9 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
// If BlobStor is already initialized, no action is taken.
//
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
-func (b *BlobStor) Init(ctx context.Context) error {
- b.log.Debug(ctx, logs.BlobstorInitializing)
+func (b *BlobStor) Init() error {
+ b.log.Debug(logs.BlobstorInitializing)
- if !b.compression.HasValidCompressionLevel() {
- b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level)))
- b.compression.Level = compression.LevelDefault
- }
if err := b.compression.Init(); err != nil {
return err
}
@@ -72,14 +67,14 @@ func (b *BlobStor) Init(ctx context.Context) error {
}
// Close releases all internal resources of BlobStor.
-func (b *BlobStor) Close(ctx context.Context) error {
- b.log.Debug(ctx, logs.BlobstorClosing)
+func (b *BlobStor) Close() error {
+ b.log.Debug(logs.BlobstorClosing)
var firstErr error
for i := range b.storage {
- err := b.storage[i].Storage.Close(ctx)
+ err := b.storage[i].Storage.Close()
if err != nil {
- b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
+ b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
if firstErr == nil {
firstErr = err
}
diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go
index 86d8f15e3..c91508e6d 100644
--- a/pkg/local_object_storage/blobstor/delete.go
+++ b/pkg/local_object_storage/blobstor/delete.go
@@ -39,7 +39,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
if err == nil || !client.IsErrObjectNotFound(err) {
if err == nil {
success = true
- logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
+ logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
}
return res, err
}
@@ -58,7 +58,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
res, err := st.Delete(ctx, prm)
if err == nil {
success = true
- logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
+ logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
}
return res, err
diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go
index c155e15b8..43feec7c9 100644
--- a/pkg/local_object_storage/blobstor/exists.go
+++ b/pkg/local_object_storage/blobstor/exists.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -72,9 +73,10 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
}
for _, err := range errors[:len(errors)-1] {
- b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
+ b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
zap.Stringer("address", prm.Address),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return common.ExistsRes{}, errors[len(errors)-1]
diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go
index 7eb7d49bf..783c198b2 100644
--- a/pkg/local_object_storage/blobstor/exists_test.go
+++ b/pkg/local_object_storage/blobstor/exists_test.go
@@ -22,7 +22,7 @@ func TestExists(t *testing.T) {
b := New(WithStorages(storages))
require.NoError(t, b.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, b.Init(context.Background()))
+ require.NoError(t, b.Init())
objects := []*objectSDK.Object{
testObject(smallSizeLimit / 2),
diff --git a/pkg/local_object_storage/blobstor/fstree/control.go b/pkg/local_object_storage/blobstor/fstree/control.go
index 2544729f7..c21d79f09 100644
--- a/pkg/local_object_storage/blobstor/fstree/control.go
+++ b/pkg/local_object_storage/blobstor/fstree/control.go
@@ -1,8 +1,6 @@
package fstree
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
)
@@ -30,7 +28,7 @@ func (t *FSTree) Init() error {
}
// Close implements common.Storage.
-func (t *FSTree) Close(_ context.Context) error {
+func (t *FSTree) Close() error {
t.metrics.Close()
return nil
}
diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go
index 3caee7ee1..b5dbc9e40 100644
--- a/pkg/local_object_storage/blobstor/fstree/counter.go
+++ b/pkg/local_object_storage/blobstor/fstree/counter.go
@@ -2,8 +2,6 @@ package fstree
import (
"sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
// FileCounter used to count files in FSTree. The implementation must be thread-safe.
@@ -54,11 +52,16 @@ func (c *SimpleCounter) Dec(size uint64) {
c.mtx.Lock()
defer c.mtx.Unlock()
- assert.True(c.count > 0, "fstree.SimpleCounter: invalid count")
- c.count--
-
- assert.True(c.size >= size, "fstree.SimpleCounter: invalid size")
- c.size -= size
+ if c.count > 0 {
+ c.count--
+ } else {
+ panic("fstree.SimpleCounter: invalid count")
+ }
+ if c.size >= size {
+ c.size -= size
+ } else {
+ panic("fstree.SimpleCounter: invalid size")
+ }
}
func (c *SimpleCounter) CountSize() (uint64, uint64) {
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 112741ab4..057796db2 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -45,7 +45,7 @@ type FSTree struct {
log *logger.Logger
- compressor *compression.Compressor
+ *compression.Config
Depth uint64
DirNameLen int
@@ -82,12 +82,12 @@ func New(opts ...Option) *FSTree {
Permissions: 0o700,
RootPath: "./",
},
- compressor: nil,
+ Config: nil,
Depth: 4,
DirNameLen: DirNameLen,
metrics: &noopMetrics{},
fileCounter: &noopCounter{},
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
}
for i := range opts {
opts[i](f)
@@ -152,8 +152,8 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
des, err := os.ReadDir(dirPath)
if err != nil {
if prm.IgnoreErrors {
- t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
- zap.Error(err),
+ t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.String("err", err.Error()),
zap.String("directory_path", dirPath))
return nil
}
@@ -196,13 +196,13 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
}
if err == nil {
- data, err = t.compressor.Decompress(data)
+ data, err = t.Decompress(data)
}
if err != nil {
if prm.IgnoreErrors {
- t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", addr),
- zap.Error(err),
+ zap.String("err", err.Error()),
zap.String("path", path))
continue
}
@@ -405,7 +405,7 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err
return common.PutRes{}, err
}
if !prm.DontCompress {
- prm.RawData = t.compressor.Compress(prm.RawData)
+ prm.RawData = t.Compress(prm.RawData)
}
size = len(prm.RawData)
@@ -448,7 +448,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err
}
}
- data, err = t.compressor.Decompress(data)
+ data, err = t.Decompress(data)
if err != nil {
return common.GetRes{}, err
}
@@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) {
},
)
if err != nil {
- return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
+ return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
}
return count, size, nil
@@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
},
)
if err != nil {
- return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
+ return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
}
success = true
return result, nil
@@ -597,16 +597,16 @@ func (t *FSTree) Path() string {
}
// SetCompressor implements common.Storage.
-func (t *FSTree) SetCompressor(cc *compression.Compressor) {
- t.compressor = cc
+func (t *FSTree) SetCompressor(cc *compression.Config) {
+ t.Config = cc
}
-func (t *FSTree) Compressor() *compression.Compressor {
- return t.compressor
+func (t *FSTree) Compressor() *compression.Config {
+ return t.Config
}
// SetReportErrorFunc implements common.Storage.
-func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) {
+func (t *FSTree) SetReportErrorFunc(_ func(string, error)) {
// Do nothing, FSTree can encounter only one error which is returned.
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index 50dae46a7..eb2126b6c 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -52,7 +52,7 @@ func TestObjectCounter(t *testing.T) {
require.Equal(t, uint64(0), size)
defer func() {
- require.NoError(t, fst.Close(context.Background()))
+ require.NoError(t, fst.Close())
}()
addr := oidtest.Address()
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
index 6d633dad6..4110ba7d7 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
@@ -67,9 +67,12 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error {
err := w.writeFile(tmpPath, data)
if err != nil {
var pe *fs.PathError
- if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) {
- err = common.ErrNoSpace
- _ = os.RemoveAll(tmpPath)
+ if errors.As(err, &pe) {
+ switch pe.Err {
+ case syscall.ENOSPC:
+ err = common.ErrNoSpace
+ _ = os.RemoveAll(tmpPath)
+ }
}
return err
}
@@ -133,6 +136,6 @@ func (w *genericWriter) removeWithCounter(p string, size uint64) error {
if err := os.Remove(p); err != nil {
return err
}
- w.fileCounter.Dec(size)
+ w.fileCounter.Dec(uint64(size))
return nil
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
index 49cbda344..3561c616b 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
@@ -69,13 +69,10 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
if err != nil {
return err
}
- written := 0
tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10)
n, err := unix.Write(fd, data)
- for err == nil {
- written += n
-
- if written == len(data) {
+ if err == nil {
+ if n == len(data) {
err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW)
if err == nil {
w.fileCounter.Inc(uint64(len(data)))
@@ -83,23 +80,9 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
if errors.Is(err, unix.EEXIST) {
err = nil
}
- break
+ } else {
+ err = errors.New("incomplete write")
}
-
- // From man 2 write:
- // https://www.man7.org/linux/man-pages/man2/write.2.html
- //
- // Note that a successful write() may transfer fewer than count
- // bytes. Such partial writes can occur for various reasons; for
- // example, because there was insufficient space on the disk device
- // to write all of the requested bytes, or because a blocked write()
- // to a socket, pipe, or similar was interrupted by a signal handler
- // after it had transferred some, but before it had transferred all
- // of the requested bytes. In the event of a partial write, the
- // caller can make another write() call to transfer the remaining
- // bytes. The subsequent call will either transfer further bytes or
- // may result in an error (e.g., if the disk is now full).
- n, err = unix.Write(fd, data[written:])
}
errClose := unix.Close(fd)
if err != nil {
@@ -131,7 +114,7 @@ func (w *linuxWriter) removeFile(p string, size uint64) error {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if err == nil {
- w.fileCounter.Dec(size)
+ w.fileCounter.Dec(uint64(size))
}
return err
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go
deleted file mode 100644
index 7fae2e695..000000000
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-//go:build linux && integration
-
-package fstree
-
-import (
- "context"
- "errors"
- "os"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "github.com/stretchr/testify/require"
- "golang.org/x/sys/unix"
-)
-
-func TestENOSPC(t *testing.T) {
- dir, err := os.MkdirTemp(t.TempDir(), "ramdisk")
- require.NoError(t, err)
-
- f, err := os.CreateTemp(t.TempDir(), "ramdisk_*")
- require.NoError(t, err)
-
- err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M")
- if errors.Is(err, unix.EPERM) {
- t.Skipf("skip size tests: no permission to mount: %v", err)
- return
- }
- require.NoError(t, err)
- defer func() {
- require.NoError(t, unix.Unmount(dir, 0))
- }()
-
- fst := New(WithPath(dir), WithDepth(1))
- require.NoError(t, fst.Open(mode.ComponentReadWrite))
- require.NoError(t, fst.Init())
-
- _, err = fst.Put(context.Background(), common.PutPrm{
- RawData: make([]byte, 10<<20),
- })
- require.ErrorIs(t, err, common.ErrNoSpace)
-}
diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go
index 6f2ac87e1..4d1f8fc22 100644
--- a/pkg/local_object_storage/blobstor/fstree/option.go
+++ b/pkg/local_object_storage/blobstor/fstree/option.go
@@ -4,6 +4,7 @@ import (
"io/fs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
type Option func(*FSTree)
@@ -52,6 +53,6 @@ func WithFileCounter(c FileCounter) Option {
func WithLogger(l *logger.Logger) Option {
return func(f *FSTree) {
- f.log = l
+ f.log = &logger.Logger{Logger: l.With(zap.String("component", "FSTree"))}
}
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
index b8e88f84a..21c80b089 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
@@ -19,7 +19,7 @@ func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
require.NoError(t, s.Init())
objects := prepare(t, 10, s, minSize, maxSize)
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Close())
require.NoError(t, s.Open(mode.ComponentReadOnly))
for i := range objects {
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
index 3a163f6b1..cf4e76513 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
@@ -15,7 +15,7 @@ func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 4, s, minSize, maxSize)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
index f34fe5f97..08465ed5e 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
@@ -14,7 +14,7 @@ func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 1, s, minSize, maxSize)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
index af0f4b45d..d1f709b0c 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
@@ -15,7 +15,7 @@ func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 2, s, minSize, maxSize)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
index 13032048c..fcbeddac7 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
@@ -17,7 +17,7 @@ func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 1, s, minSize, maxSize)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
index d54c54f59..3a6c8b699 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
@@ -3,7 +3,6 @@ package blobstortest
import (
"context"
"errors"
- "slices"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -15,7 +14,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 10, s, minSize, maxSize)
@@ -27,7 +26,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
_, err := s.Delete(context.Background(), delPrm)
require.NoError(t, err)
- objects = slices.Delete(objects, delID, delID+1)
+ objects = append(objects[:delID], objects[delID+1:]...)
runTestNormalHandler(t, s, objects)
@@ -50,7 +49,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc)
_, err := s.Iterate(context.Background(), iterPrm)
require.NoError(t, err)
- require.Len(t, objects, len(seen))
+ require.Equal(t, len(objects), len(seen))
for i := range objects {
d, ok := seen[objects[i].addr.String()]
require.True(t, ok)
diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go
index ff1aa9d64..f213d7547 100644
--- a/pkg/local_object_storage/blobstor/iterate.go
+++ b/pkg/local_object_storage/blobstor/iterate.go
@@ -42,10 +42,10 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
_, err := b.storage[i].Storage.Iterate(ctx, prm)
if err != nil {
if prm.IgnoreErrors {
- b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
zap.String("storage_path", b.storage[i].Storage.Path()),
zap.String("storage_type", b.storage[i].Storage.Type()),
- zap.Error(err))
+ zap.String("err", err.Error()))
continue
}
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go
index 2786321a8..079728380 100644
--- a/pkg/local_object_storage/blobstor/iterate_test.go
+++ b/pkg/local_object_storage/blobstor/iterate_test.go
@@ -3,14 +3,10 @@ package blobstor
import (
"context"
"encoding/binary"
- "errors"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -25,9 +21,7 @@ func TestIterateObjects(t *testing.T) {
// create BlobStor instance
blobStor := New(
WithStorages(defaultStorages(p, smalSz)),
- WithCompression(compression.Config{
- Enabled: true,
- }),
+ WithCompressObjects(true),
)
defer os.RemoveAll(p)
@@ -36,9 +30,9 @@ func TestIterateObjects(t *testing.T) {
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
// initialize Blobstor
- require.NoError(t, blobStor.Init(context.Background()))
+ require.NoError(t, blobStor.Init())
- defer blobStor.Close(context.Background())
+ defer blobStor.Close()
const objNum = 5
@@ -50,7 +44,7 @@ func TestIterateObjects(t *testing.T) {
mObjs := make(map[string]addrData)
- for i := range uint64(objNum) {
+ for i := uint64(0); i < objNum; i++ {
sz := smalSz
big := i < objNum/2
@@ -96,60 +90,117 @@ func TestIterateObjects(t *testing.T) {
}
func TestIterate_IgnoreErrors(t *testing.T) {
- ctx := context.Background()
-
- myErr := errors.New("unique error")
- nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil }
- panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") }
- errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr }
-
- var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error)
- st1 := teststore.New(
- teststore.WithSubstorage(memstore.New()),
- teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
- return s1iter(prm)
- }))
- st2 := teststore.New(
- teststore.WithSubstorage(memstore.New()),
- teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
- return s2iter(prm)
- }))
-
- bsOpts := []Option{WithStorages([]SubStorage{
- {Storage: st1},
- {Storage: st2},
- })}
- bs := New(bsOpts...)
- require.NoError(t, bs.Open(ctx, mode.ReadWrite))
- require.NoError(t, bs.Init(ctx))
-
- nopHandler := func(e common.IterationElement) error {
- return nil
- }
-
- t.Run("no errors", func(t *testing.T) {
- s1iter = nopIter
- s2iter = nopIter
- _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
- require.NoError(t, err)
- })
- t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) {
- s1iter = errIter
- s2iter = panicIter
- _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
- require.ErrorIs(t, err, myErr)
- })
-
- t.Run("ignore errors, storage 1", func(t *testing.T) {
- s1iter = errIter
- s2iter = nopIter
- _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
- require.NoError(t, err)
- })
- t.Run("ignore errors, storage 2", func(t *testing.T) {
- s1iter = nopIter
- s2iter = errIter
- _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
- require.NoError(t, err)
- })
+ t.Skip()
+ // dir := t.TempDir()
+ //
+ // const (
+ // smallSize = 512
+ // objCount = 5
+ // )
+ // bsOpts := []Option{
+ // WithCompressObjects(true),
+ // WithRootPath(dir),
+ // WithSmallSizeLimit(smallSize * 2), // + header
+ // WithBlobovniczaOpenedCacheSize(1),
+ // WithBlobovniczaShallowWidth(1),
+ // WithBlobovniczaShallowDepth(1)}
+ // bs := New(bsOpts...)
+ // require.NoError(t, bs.Open(false))
+ // require.NoError(t, bs.Init())
+ //
+ // addrs := make([]oid.Address, objCount)
+ // for i := range addrs {
+ // addrs[i] = oidtest.Address()
+ //
+ // obj := object.New()
+ // obj.SetContainerID(addrs[i].Container())
+ // obj.SetID(addrs[i].Object())
+ // obj.SetPayload(make([]byte, smallSize<<(i%2)))
+ //
+ // objData, err := obj.Marshal()
+ // require.NoError(t, err)
+ //
+ // _, err = bs.PutRaw(addrs[i], objData, true)
+ // require.NoError(t, err)
+ // }
+ //
+ // // Construct corrupted compressed object.
+ // buf := bytes.NewBuffer(nil)
+ // badObject := make([]byte, smallSize/2+1)
+ // enc, err := zstd.NewWriter(buf)
+ // require.NoError(t, err)
+ // rawData := enc.EncodeAll(badObject, nil)
+ // for i := 4; /* magic size */ i < len(rawData); i += 2 {
+ // rawData[i] ^= 0xFF
+ // }
+ // // Will be put uncompressed but fetched as compressed because of magic.
+ // _, err = bs.PutRaw(oidtest.Address(), rawData, false)
+ // require.NoError(t, err)
+ // require.NoError(t, bs.fsTree.Put(oidtest.Address(), rawData))
+ //
+ // require.NoError(t, bs.Close())
+ //
+ // // Increase width to have blobovnicza which is definitely empty.
+ // b := New(append(bsOpts, WithBlobovniczaShallowWidth(2))...)
+ // require.NoError(t, b.Open(false))
+ // require.NoError(t, b.Init())
+ //
+ // var p string
+ // for i := 0; i < 2; i++ {
+ // bp := filepath.Join(bs.rootPath, "1", strconv.FormatUint(uint64(i), 10))
+ // if _, ok := bs.blobovniczas.opened.Get(bp); !ok {
+ // p = bp
+ // break
+ // }
+ // }
+ // require.NotEqual(t, "", p, "expected to not have at least 1 blobovnicza in cache")
+ // require.NoError(t, os.Chmod(p, 0))
+ //
+ // require.NoError(t, b.Close())
+ // require.NoError(t, bs.Open(false))
+ // require.NoError(t, bs.Init())
+ //
+ // var prm IteratePrm
+ // prm.SetIterationHandler(func(e IterationElement) error {
+ // return nil
+ // })
+ // _, err = bs.Iterate(prm)
+ // require.Error(t, err)
+ //
+ // prm.IgnoreErrors()
+ //
+ // t.Run("skip invalid objects", func(t *testing.T) {
+ // actual := make([]oid.Address, 0, len(addrs))
+ // prm.SetIterationHandler(func(e IterationElement) error {
+ // obj := object.New()
+ // err := obj.Unmarshal(e.data)
+ // if err != nil {
+ // return err
+ // }
+ //
+ // var addr oid.Address
+ // cnr, _ := obj.ContainerID()
+ // addr.SetContainer(cnr)
+ // id, _ := obj.ID()
+ // addr.SetObject(id)
+ // actual = append(actual, addr)
+ // return nil
+ // })
+ //
+ // _, err := bs.Iterate(prm)
+ // require.NoError(t, err)
+ // require.ElementsMatch(t, addrs, actual)
+ // })
+ // t.Run("return errors from handler", func(t *testing.T) {
+ // n := 0
+ // expectedErr := errors.New("expected error")
+ // prm.SetIterationHandler(func(e IterationElement) error {
+ // if n++; n == objCount/2 {
+ // return expectedErr
+ // }
+ // return nil
+ // })
+ // _, err := bs.Iterate(prm)
+ // require.ErrorIs(t, err, expectedErr)
+ // })
}
diff --git a/pkg/local_object_storage/blobstor/logger.go b/pkg/local_object_storage/blobstor/logger.go
index 070b1eac9..7e057a0e3 100644
--- a/pkg/local_object_storage/blobstor/logger.go
+++ b/pkg/local_object_storage/blobstor/logger.go
@@ -1,8 +1,6 @@
package blobstor
import (
- "context"
-
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -13,8 +11,8 @@ const (
putOp = "PUT"
)
-func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
- storagelog.Write(ctx, l,
+func logOp(l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
+ storagelog.Write(l,
storagelog.AddressField(addr),
storagelog.OpField(op),
storagelog.StorageTypeField(typ),
diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go
index 3df96a1c3..449d4352a 100644
--- a/pkg/local_object_storage/blobstor/memstore/control.go
+++ b/pkg/local_object_storage/blobstor/memstore/control.go
@@ -1,8 +1,6 @@
package memstore
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
@@ -12,11 +10,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error {
return nil
}
-func (s *memstoreImpl) Init() error { return nil }
-func (s *memstoreImpl) Close(context.Context) error { return nil }
-func (s *memstoreImpl) Type() string { return Type }
-func (s *memstoreImpl) Path() string { return s.rootPath }
-func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc }
-func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression }
-func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {}
-func (s *memstoreImpl) SetParentID(string) {}
+func (s *memstoreImpl) Init() error { return nil }
+func (s *memstoreImpl) Close() error { return nil }
+func (s *memstoreImpl) Type() string { return Type }
+func (s *memstoreImpl) Path() string { return s.rootPath }
+func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
+func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
+func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f }
+func (s *memstoreImpl) SetParentID(string) {}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go
index 7ef7e37a4..0252c7983 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore.go
@@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes,
// Decompress the data.
var err error
if data, err = s.compression.Decompress(data); err != nil {
- return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
+ return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// Unmarshal the SDK object.
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
+ return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
@@ -133,11 +133,11 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common
elem := common.IterationElement{
ObjectData: v,
}
- if err := elem.Address.DecodeString(k); err != nil {
+ if err := elem.Address.DecodeString(string(k)); err != nil {
if req.IgnoreErrors {
continue
}
- return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err))
+ return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err))
}
var err error
if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil {
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
index f904d4232..8d1480dff 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
@@ -15,8 +16,9 @@ import (
func TestSimpleLifecycle(t *testing.T) {
s := New(
WithRootPath("memstore"),
+ WithLogger(test.NewLogger(t)),
)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go
index 7605af4e5..3d67b1e9c 100644
--- a/pkg/local_object_storage/blobstor/memstore/option.go
+++ b/pkg/local_object_storage/blobstor/memstore/option.go
@@ -2,20 +2,33 @@ package memstore
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
type cfg struct {
+ log *logger.Logger
rootPath string
readOnly bool
- compression *compression.Compressor
+ compression *compression.Config
+ reportError func(string, error)
}
func defaultConfig() *cfg {
- return &cfg{}
+ return &cfg{
+ log: &logger.Logger{Logger: zap.L()},
+ reportError: func(string, error) {},
+ }
}
type Option func(*cfg)
+func WithLogger(l *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = l
+ }
+}
+
func WithRootPath(p string) Option {
return func(c *cfg) {
c.rootPath = p
diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go
index 80268fa7a..a579a6f92 100644
--- a/pkg/local_object_storage/blobstor/mode.go
+++ b/pkg/local_object_storage/blobstor/mode.go
@@ -8,7 +8,7 @@ import (
)
// SetMode sets the blobstor mode of operation.
-func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
+func (b *BlobStor) SetMode(m mode.Mode) error {
b.modeMtx.Lock()
defer b.modeMtx.Unlock()
@@ -20,14 +20,14 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
return nil
}
- err := b.Close(ctx)
+ err := b.Close()
if err == nil {
- if err = b.openBlobStor(ctx, m); err == nil {
- err = b.Init(ctx)
+ if err = b.openBlobStor(context.TODO(), m); err == nil {
+ err = b.Init()
}
}
if err != nil {
- return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
+ return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
}
b.mode = m
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index 64e3c8da1..1ac769e36 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -106,7 +106,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close(context.Background())) }()
+ defer func() { require.NoError(b, st.Close()) }()
// Fill database
var errG errgroup.Group
@@ -161,7 +161,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
gen := genEntry.create()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close(context.Background())) }()
+ defer func() { require.NoError(b, st.Close()) }()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
@@ -200,7 +200,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close(context.Background())) }()
+ defer func() { require.NoError(b, st.Close()) }()
// Fill database
for range tt.size {
diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go
index fe9c109dd..1adae303d 100644
--- a/pkg/local_object_storage/blobstor/put.go
+++ b/pkg/local_object_storage/blobstor/put.go
@@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
// marshal object
data, err := prm.Object.Marshal()
if err != nil {
- return common.PutRes{}, fmt.Errorf("marshal the object: %w", err)
+ return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err)
}
prm.RawData = data
}
@@ -63,7 +63,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
res, err := b.storage[i].Storage.Put(ctx, prm)
if err == nil {
success = true
- logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
+ logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
}
return res, err
}
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
index f28816555..7b2786ba2 100644
--- a/pkg/local_object_storage/blobstor/rebuild.go
+++ b/pkg/local_object_storage/blobstor/rebuild.go
@@ -13,19 +13,24 @@ type StorageIDUpdate interface {
UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error
}
-func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error {
+type ConcurrentWorkersLimiter interface {
+ AcquireWorkSlot(ctx context.Context) error
+ ReleaseWorkSlot()
+}
+
+func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, fillPercent int) error {
var summary common.RebuildRes
var rErr error
for _, storage := range b.storage {
res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{
- MetaStorage: upd,
- Limiter: concLimiter,
- FillPercent: fillPercent,
+ MetaStorage: upd,
+ WorkerLimiter: limiter,
+ FillPercent: fillPercent,
})
summary.FilesRemoved += res.FilesRemoved
summary.ObjectsMoved += res.ObjectsMoved
if err != nil {
- b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages,
+ b.log.Error(logs.BlobstorRebuildFailedToRebuildStorages,
zap.String("failed_storage_path", storage.Storage.Path()),
zap.String("failed_storage_type", storage.Storage.Type()),
zap.Error(err))
@@ -33,7 +38,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter
break
}
}
- b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted,
+ b.log.Info(logs.BlobstorRebuildRebuildStoragesCompleted,
zap.Bool("success", rErr == nil),
zap.Uint64("total_files_removed", summary.FilesRemoved),
zap.Uint64("total_objects_moved", summary.ObjectsMoved))
diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go
index 3a38ecf82..bc0bed49d 100644
--- a/pkg/local_object_storage/blobstor/teststore/option.go
+++ b/pkg/local_object_storage/blobstor/teststore/option.go
@@ -1,8 +1,6 @@
package teststore
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -17,9 +15,9 @@ type cfg struct {
Type func() string
Path func() string
- SetCompressor func(cc *compression.Compressor)
- Compressor func() *compression.Compressor
- SetReportErrorFunc func(f func(context.Context, string, error))
+ SetCompressor func(cc *compression.Config)
+ Compressor func() *compression.Config
+ SetReportErrorFunc func(f func(string, error))
Get func(common.GetPrm) (common.GetRes, error)
GetRange func(common.GetRangePrm) (common.GetRangeRes, error)
@@ -45,15 +43,15 @@ func WithClose(f func() error) Option { return func(c *cfg) { c
func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } }
func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } }
-func WithSetCompressor(f func(*compression.Compressor)) Option {
+func WithSetCompressor(f func(*compression.Config)) Option {
return func(c *cfg) { c.overrides.SetCompressor = f }
}
-func WithCompressor(f func() *compression.Compressor) Option {
+func WithCompressor(f func() *compression.Config) Option {
return func(c *cfg) { c.overrides.Compressor = f }
}
-func WithReportErrorFunc(f func(func(context.Context, string, error))) Option {
+func WithReportErrorFunc(f func(func(string, error))) Option {
return func(c *cfg) { c.overrides.SetReportErrorFunc = f }
}
diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go
index 190b6a876..fea4a2d49 100644
--- a/pkg/local_object_storage/blobstor/teststore/teststore.go
+++ b/pkg/local_object_storage/blobstor/teststore/teststore.go
@@ -77,14 +77,14 @@ func (s *TestStore) Init() error {
}
}
-func (s *TestStore) Close(ctx context.Context) error {
+func (s *TestStore) Close() error {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Close != nil:
return s.overrides.Close()
case s.st != nil:
- return s.st.Close(ctx)
+ return s.st.Close()
default:
panic("unexpected storage call: Close()")
}
@@ -116,7 +116,7 @@ func (s *TestStore) Path() string {
}
}
-func (s *TestStore) SetCompressor(cc *compression.Compressor) {
+func (s *TestStore) SetCompressor(cc *compression.Config) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
@@ -129,7 +129,7 @@ func (s *TestStore) SetCompressor(cc *compression.Compressor) {
}
}
-func (s *TestStore) Compressor() *compression.Compressor {
+func (s *TestStore) Compressor() *compression.Config {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
@@ -142,7 +142,7 @@ func (s *TestStore) Compressor() *compression.Compressor {
}
}
-func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) {
+func (s *TestStore) SetReportErrorFunc(f func(string, error)) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go
index e0617a832..e45f502ac 100644
--- a/pkg/local_object_storage/engine/container.go
+++ b/pkg/local_object_storage/engine/container.go
@@ -44,25 +44,22 @@ func (r ListContainersRes) Containers() []cid.ID {
// ContainerSize returns the sum of estimation container sizes among all shards.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) {
- defer elapsed("ContainerSize", e.metrics.AddMethodDuration)()
-
+func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
err = e.execIfNotBlocked(func() error {
- var csErr error
- res, csErr = e.containerSize(ctx, prm)
- return csErr
+ res, err = e.containerSize(prm)
+ return err
})
return
}
// ContainerSize calls ContainerSize method on engine to calculate sum of estimation container sizes among all shards.
-func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, error) {
+func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) {
var prm ContainerSizePrm
prm.SetContainerID(id)
- res, err := e.ContainerSize(ctx, prm)
+ res, err := e.ContainerSize(prm)
if err != nil {
return 0, err
}
@@ -70,15 +67,18 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er
return res.Size(), nil
}
-func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
- var res ContainerSizeRes
- err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
+ if e.metrics != nil {
+ defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)()
+ }
+
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
var csPrm shard.ContainerSizePrm
csPrm.SetContainerID(prm.cnr)
- csRes, err := sh.ContainerSize(ctx, csPrm)
+ csRes, err := sh.Shard.ContainerSize(csPrm)
if err != nil {
- e.reportShardError(ctx, sh, "can't get container size", err,
+ e.reportShardError(sh, "can't get container size", err,
zap.Stringer("container_id", prm.cnr))
return false
}
@@ -88,19 +88,16 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm)
return false
})
- return res, err
+ return
}
// ListContainers returns a unique container IDs presented in the engine objects.
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) (res ListContainersRes, err error) {
- defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
-
err = e.execIfNotBlocked(func() error {
- var lcErr error
- res, lcErr = e.listContainers(ctx)
- return lcErr
+ res, err = e.listContainers(ctx)
+ return err
})
return
@@ -119,12 +116,16 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) {
}
func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) {
+ if e.metrics != nil {
+ defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
+ }
+
uniqueIDs := make(map[string]cid.ID)
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
- res, err := sh.ListContainers(ctx, shard.ListContainersPrm{})
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+ res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{})
if err != nil {
- e.reportShardError(ctx, sh, "can't get list of containers", err)
+ e.reportShardError(sh, "can't get list of containers", err)
return false
}
@@ -136,9 +137,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes,
}
return false
- }); err != nil {
- return ListContainersRes{}, err
- }
+ })
result := make([]cid.ID, 0, len(uniqueIDs))
for _, v := range uniqueIDs {
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index bf1649f6e..4778cf539 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -22,6 +22,10 @@ type shardInitError struct {
// Open opens all StorageEngine's components.
func (e *StorageEngine) Open(ctx context.Context) error {
+ return e.open(ctx)
+}
+
+func (e *StorageEngine) open(ctx context.Context) error {
e.mtx.Lock()
defer e.mtx.Unlock()
@@ -45,16 +49,16 @@ func (e *StorageEngine) Open(ctx context.Context) error {
for res := range errCh {
if res.err != nil {
- e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping,
+ e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
sh := e.shards[res.id]
delete(e.shards, res.id)
- err := sh.Close(ctx)
+ err := sh.Close()
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
+ e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
@@ -73,7 +77,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
errCh := make(chan shardInitError, len(e.shards))
var eg errgroup.Group
- if e.lowMem && e.anyShardRequiresRefill() {
+ if e.cfg.lowMem && e.anyShardRequiresRefill() {
eg.SetLimit(1)
}
@@ -91,29 +95,29 @@ func (e *StorageEngine) Init(ctx context.Context) error {
err := eg.Wait()
close(errCh)
if err != nil {
- return fmt.Errorf("initialize shards: %w", err)
+ return fmt.Errorf("failed to initialize shards: %w", err)
}
for res := range errCh {
if res.err != nil {
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
- e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping,
+ e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
sh := e.shards[res.id]
delete(e.shards, res.id)
- err := sh.Close(ctx)
+ err := sh.Close()
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
+ e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
continue
}
- return fmt.Errorf("initialize shard %s: %w", res.id, res.err)
+ return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
}
}
@@ -122,7 +126,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
}
e.wg.Add(1)
- go e.setModeLoop(ctx)
+ go e.setModeLoop()
return nil
}
@@ -145,19 +149,25 @@ var errClosed = errors.New("storage engine is closed")
func (e *StorageEngine) Close(ctx context.Context) error {
close(e.closeCh)
defer e.wg.Wait()
- return e.closeEngine(ctx)
+ return e.setBlockExecErr(ctx, errClosed)
}
// closes all shards. Never returns an error, shard errors are logged.
-func (e *StorageEngine) closeAllShards(ctx context.Context) error {
+func (e *StorageEngine) close(releasePools bool) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
+ if releasePools {
+ for _, p := range e.shardPools {
+ p.Release()
+ }
+ }
+
for id, sh := range e.shards {
- if err := sh.Close(ctx); err != nil {
- e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
+ if err := sh.Close(); err != nil {
+ e.log.Debug(logs.EngineCouldNotCloseShard,
zap.String("id", id),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
}
}
@@ -172,29 +182,90 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error {
e.blockExec.mtx.RLock()
defer e.blockExec.mtx.RUnlock()
- if e.blockExec.closed {
- return errClosed
+ if e.blockExec.err != nil {
+ return e.blockExec.err
}
return op()
}
-func (e *StorageEngine) closeEngine(ctx context.Context) error {
+// sets the flag of blocking execution of all data operations according to err:
+// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method
+// (if err == errClosed => additionally releases pools and does not allow to resume executions).
+// - otherwise, resumes execution. If exec was blocked, calls open method.
+//
+// Can be called concurrently with exec. In this case it waits for all executions to complete.
+func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error {
e.blockExec.mtx.Lock()
defer e.blockExec.mtx.Unlock()
- if e.blockExec.closed {
+ prevErr := e.blockExec.err
+
+ wasClosed := errors.Is(prevErr, errClosed)
+ if wasClosed {
return errClosed
}
- e.blockExec.closed = true
- return e.closeAllShards(ctx)
+ e.blockExec.err = err
+
+ if err == nil {
+ if prevErr != nil { // block -> ok
+ return e.open(ctx)
+ }
+ } else if prevErr == nil { // ok -> block
+ return e.close(errors.Is(err, errClosed))
+ }
+
+ // otherwise do nothing
+
+ return nil
+}
+
+// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err.
+// To resume the execution, use ResumeExecution method.
+//
+// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources
+// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions
+// to complete). Returns error if any Close has been called before.
+//
+// Must not be called concurrently with either Open or Init.
+//
+// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution
+// for this.
+func (e *StorageEngine) BlockExecution(err error) error {
+ return e.setBlockExecErr(context.Background(), err)
+}
+
+// ResumeExecution resumes the execution of any data-related operation.
+// To block the execution, use BlockExecution method.
+//
+// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources
+// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions
+// to complete). Returns error if any Close has been called before.
+//
+// Must not be called concurrently with either Open or Init.
+func (e *StorageEngine) ResumeExecution() error {
+ return e.setBlockExecErr(context.Background(), nil)
}
type ReConfiguration struct {
+ errorsThreshold uint32
+ shardPoolSize uint32
+
shards map[string][]shard.Option // meta path -> shard opts
}
+// SetErrorsThreshold sets a size amount of errors after which
+// shard is moved to read-only mode.
+func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) {
+ rCfg.errorsThreshold = errorsThreshold
+}
+
+// SetShardPoolSize sets a size of worker pool for each shard.
+func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) {
+ rCfg.shardPoolSize = shardPoolSize
+}
+
// AddShard adds a shard for the reconfiguration.
// Shard identifier is calculated from paths used in blobstor.
func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
@@ -249,12 +320,12 @@ loop:
e.mtx.RUnlock()
- e.removeShards(ctx, shardsToRemove...)
+ e.removeShards(shardsToRemove...)
for _, p := range shardsToReload {
err := p.sh.Reload(ctx, p.opts...)
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotReloadAShard,
+ e.log.Error(logs.EngineCouldNotReloadAShard,
zap.Stringer("shard id", p.sh.ID()),
zap.Error(err))
}
@@ -263,7 +334,7 @@ loop:
for _, newID := range shardsToAdd {
sh, err := e.createShard(ctx, rcfg.shards[newID])
if err != nil {
- return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err)
+ return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
}
idStr := sh.ID().String()
@@ -273,17 +344,17 @@ loop:
err = sh.Init(ctx)
}
if err != nil {
- _ = sh.Close(ctx)
- return fmt.Errorf("init %s shard: %w", idStr, err)
+ _ = sh.Close()
+ return fmt.Errorf("could not init %s shard: %w", idStr, err)
}
err = e.addShard(sh)
if err != nil {
- _ = sh.Close(ctx)
- return fmt.Errorf("add %s shard: %w", idStr, err)
+ _ = sh.Close()
+ return fmt.Errorf("could not add %s shard: %w", idStr, err)
}
- e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
+ e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr))
}
return nil
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 4ff0ed5ec..2de92ae84 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -2,6 +2,7 @@ package engine
import (
"context"
+ "errors"
"fmt"
"io/fs"
"os"
@@ -11,14 +12,17 @@ import (
"testing"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
@@ -159,6 +163,42 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O
require.Equal(t, 1, shardCount)
}
+func TestExecBlocks(t *testing.T) {
+ e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
+
+ // put some object
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+
+ addr := object.AddressOf(obj)
+
+ require.NoError(t, Put(context.Background(), e, obj))
+
+ // block executions
+ errBlock := errors.New("block exec err")
+
+ require.NoError(t, e.BlockExecution(errBlock))
+
+ // try to exec some op
+ _, err := Head(context.Background(), e, addr)
+ require.ErrorIs(t, err, errBlock)
+
+ // resume executions
+ require.NoError(t, e.ResumeExecution())
+
+ _, err = Head(context.Background(), e, addr) // can be any data-related op
+ require.NoError(t, err)
+
+ // close
+ require.NoError(t, e.Close(context.Background()))
+
+ // try exec after close
+ _, err = Head(context.Background(), e, addr)
+ require.Error(t, err)
+
+ // try to resume
+ require.Error(t, e.ResumeExecution())
+}
+
func TestPersistentShardID(t *testing.T) {
dir := t.TempDir()
@@ -205,6 +245,7 @@ func TestReload(t *testing.T) {
// no new paths => no new shards
require.Equal(t, shardNum, len(e.shards))
+ require.Equal(t, shardNum, len(e.shardPools))
newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum))
@@ -216,6 +257,7 @@ func TestReload(t *testing.T) {
require.NoError(t, e.Reload(context.Background(), rcfg))
require.Equal(t, shardNum+1, len(e.shards))
+ require.Equal(t, shardNum+1, len(e.shardPools))
require.NoError(t, e.Close(context.Background()))
})
@@ -235,6 +277,7 @@ func TestReload(t *testing.T) {
// removed one
require.Equal(t, shardNum-1, len(e.shards))
+ require.Equal(t, shardNum-1, len(e.shardPools))
require.NoError(t, e.Close(context.Background()))
})
@@ -259,8 +302,7 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
meta.WithEpochState(epochState{}),
),
}
- }).
- prepare(t)
+ })
e, ids := te.engine, te.shardIDs
for _, id := range ids {
@@ -268,6 +310,10 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
}
require.Equal(t, num, len(e.shards))
+ require.Equal(t, num, len(e.shardPools))
+
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
return e, currShards
}
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 223cdbc48..318f938fb 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -23,6 +24,9 @@ type DeletePrm struct {
forceRemoval bool
}
+// DeleteRes groups the resulting values of Delete operation.
+type DeleteRes struct{}
+
// WithAddress is a Delete option to set the addresses of the objects to delete.
//
// Option is required.
@@ -47,21 +51,27 @@ func (p *DeletePrm) WithForceRemoval() {
// NOTE: Marks any object to be deleted (despite any prohibitions
// on operations with that object) if WithForceRemoval option has
// been provided.
-func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error {
+func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
attribute.Bool("force_removal", prm.forceRemoval),
))
defer span.End()
- defer elapsed("Delete", e.metrics.AddMethodDuration)()
- return e.execIfNotBlocked(func() error {
- return e.delete(ctx, prm)
+ err = e.execIfNotBlocked(func() error {
+ res, err = e.delete(ctx, prm)
+ return err
})
+
+ return
}
-func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
+func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
+ if e.metrics != nil {
+ defer elapsed("Delete", e.metrics.AddMethodDuration)()
+ }
+
var locked struct {
is bool
}
@@ -71,7 +81,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
// Removal of a big object is done in multiple stages:
// 1. Remove the parent object. If it is locked or already removed, return immediately.
// 2. Otherwise, search for all objects with a particular SplitID and delete them too.
- if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
var existsPrm shard.ExistsPrm
existsPrm.Address = prm.addr
@@ -90,7 +100,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
return false
} else {
if !client.IsErrObjectNotFound(err) {
- e.reportShardError(ctx, sh, "could not check object existence", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(sh, "could not check object existence", err, zap.Stringer("address", prm.addr))
}
return false
}
@@ -106,7 +116,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
_, err = sh.Inhume(ctx, shPrm)
if err != nil {
- e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr))
var target *apistatus.ObjectLocked
locked.is = errors.As(err, &target)
@@ -116,40 +126,39 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
// If a parent object is removed we should set GC mark on each shard.
return splitInfo == nil
- }); err != nil {
- return err
- }
+ })
if locked.is {
- return new(apistatus.ObjectLocked)
+ return DeleteRes{}, new(apistatus.ObjectLocked)
}
if splitInfo != nil {
- return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
+ e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
}
- return nil
+ return DeleteRes{}, nil
}
-func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error {
+func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) {
var fs objectSDK.SearchFilters
fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID)
var selectPrm shard.SelectPrm
selectPrm.SetFilters(fs)
- selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID
+ selectPrm.SetContainerID(addr.Container())
var inhumePrm shard.InhumePrm
if force {
inhumePrm.ForceRemoval()
}
- return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Select(ctx, selectPrm)
if err != nil {
- e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
+ e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return false
}
@@ -158,9 +167,10 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
_, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
- e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
+ e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
- zap.Error(err))
+ zap.String("err", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
}
}
@@ -181,15 +191,16 @@ func (e *StorageEngine) deleteChunks(
var objID oid.ID
err := objID.ReadFromV2(chunk.ID)
if err != nil {
- e.reportShardError(ctx, sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr))
}
addr.SetObject(objID)
inhumePrm.MarkAsGarbage(addr)
_, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
- e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
+ e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
- zap.Error(err))
+ zap.String("err", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
}
}
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index a56598c09..4a6758012 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -48,13 +49,18 @@ func TestDeleteBigObject(t *testing.T) {
link.SetSplitID(splitID)
link.SetChildren(childIDs...)
- e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ s1 := testNewShard(t)
+ s2 := testNewShard(t)
+ s3 := testNewShard(t)
+
+ e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine
+ e.log = test.NewLogger(t)
+ defer e.Close(context.Background())
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i], false))
+ require.NoError(t, Put(context.Background(), e, children[i]))
}
- require.NoError(t, Put(context.Background(), e, link, false))
+ require.NoError(t, Put(context.Background(), e, link))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
@@ -70,7 +76,8 @@ func TestDeleteBigObject(t *testing.T) {
deletePrm.WithForceRemoval()
deletePrm.WithAddress(addrParent)
- require.NoError(t, e.Delete(context.Background(), deletePrm))
+ _, err := e.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
@@ -112,18 +119,16 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
link.SetSplitID(splitID)
link.SetChildren(childIDs...)
- te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
- return []shard.Option{shard.WithDisabledGC()}
- }).prepare(t)
- e := te.engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ s1 := testNewShard(t, shard.WithDisabledGC())
- s1 := te.shards[0]
+ e := testNewEngine(t).setInitializedShards(t, s1).engine
+ e.log = test.NewLogger(t)
+ defer e.Close(context.Background())
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i], false))
+ require.NoError(t, Put(context.Background(), e, children[i]))
}
- require.NoError(t, Put(context.Background(), e, link, false))
+ require.NoError(t, Put(context.Background(), e, link))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
@@ -140,7 +145,8 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
deletePrm.WithForceRemoval()
deletePrm.WithAddress(addrParent)
- require.NoError(t, e.Delete(context.Background(), deletePrm))
+ _, err := e.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
@@ -151,7 +157,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
// delete physical
var delPrm shard.DeletePrm
delPrm.SetAddresses(addrParent)
- _, err := s1.Delete(context.Background(), delPrm)
+ _, err = s1.Delete(context.Background(), delPrm)
require.NoError(t, err)
delPrm.SetAddresses(addrLink)
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 376d545d3..5e883a641 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -12,8 +12,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
@@ -28,13 +28,16 @@ type StorageEngine struct {
shards map[string]hashedShard
+ shardPools map[string]util.WorkerPool
+
closeCh chan struct{}
setModeCh chan setModeRequest
wg sync.WaitGroup
blockExec struct {
- mtx sync.RWMutex
- closed bool
+ mtx sync.RWMutex
+
+ err error
}
evacuateLimiter *evacuationLimiter
}
@@ -52,7 +55,7 @@ type setModeRequest struct {
// setModeLoop listens setModeCh to perform degraded mode transition of a single shard.
// Instead of creating a worker per single shard we use a single goroutine.
-func (e *StorageEngine) setModeLoop(ctx context.Context) {
+func (e *StorageEngine) setModeLoop() {
defer e.wg.Done()
var (
@@ -72,7 +75,7 @@ func (e *StorageEngine) setModeLoop(ctx context.Context) {
if !ok {
inProgress[sid] = struct{}{}
go func() {
- e.moveToDegraded(ctx, r.sh, r.errorCount, r.isMeta)
+ e.moveToDegraded(r.sh, r.errorCount, r.isMeta)
mtx.Lock()
delete(inProgress, sid)
@@ -84,7 +87,7 @@ func (e *StorageEngine) setModeLoop(ctx context.Context) {
}
}
-func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, errCount uint32, isMeta bool) {
+func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta bool) {
sid := sh.ID()
log := e.log.With(
zap.Stringer("shard_id", sid),
@@ -94,26 +97,28 @@ func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, err
defer e.mtx.RUnlock()
if isMeta {
- err := sh.SetMode(ctx, mode.DegradedReadOnly)
+ err := sh.SetMode(mode.DegradedReadOnly)
if err == nil {
- log.Info(ctx, logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
+ log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
return
}
- log.Error(ctx, logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
+ log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
zap.Error(err))
}
- err := sh.SetMode(ctx, mode.ReadOnly)
+ err := sh.SetMode(mode.ReadOnly)
if err != nil {
- log.Error(ctx, logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
+ log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
return
}
- log.Info(ctx, logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
+ log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
}
-// reportShardErrorByID increases shard error counter and logs an error.
-func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg string, err error) {
+// reportShardErrorBackground increases shard error counter and logs an error.
+// It is intended to be used from background workers and
+// doesn't change shard mode because of possible deadlocks.
+func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err error) {
e.mtx.RLock()
sh, ok := e.shards[id]
e.mtx.RUnlock()
@@ -122,33 +127,50 @@ func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg
return
}
- e.reportShardError(ctx, sh, msg, err)
+ if isLogical(err) {
+ e.log.Warn(msg,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.String("error", err.Error()))
+ return
+ }
+
+ errCount := sh.errorCount.Add(1)
+ sh.Shard.IncErrorCounter()
+ e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err)
}
// reportShardError checks that the amount of errors doesn't exceed the configured threshold.
// If it does, shard is set to read-only mode.
func (e *StorageEngine) reportShardError(
- ctx context.Context,
sh hashedShard,
msg string,
err error,
fields ...zap.Field,
) {
if isLogical(err) {
- e.log.Warn(ctx, msg,
+ e.log.Warn(msg,
zap.Stringer("shard_id", sh.ID()),
- zap.Error(err))
+ zap.String("error", err.Error()))
return
}
errCount := sh.errorCount.Add(1)
- e.metrics.IncErrorCounter(sh.ID().String())
+ sh.Shard.IncErrorCounter()
+ e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err, fields...)
+}
+func (e *StorageEngine) reportShardErrorWithFlags(
+ sh *shard.Shard,
+ errCount uint32,
+ msg string,
+ err error,
+ fields ...zap.Field,
+) {
sid := sh.ID()
- e.log.Warn(ctx, msg, append([]zap.Field{
+ e.log.Warn(msg, append([]zap.Field{
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
- zap.Error(err),
+ zap.String("error", err.Error()),
}, fields...)...)
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {
@@ -157,7 +179,7 @@ func (e *StorageEngine) reportShardError(
req := setModeRequest{
errorCount: errCount,
- sh: sh.Shard,
+ sh: sh,
isMeta: errors.As(err, new(metaerr.Error)),
}
@@ -166,17 +188,14 @@ func (e *StorageEngine) reportShardError(
default:
// For background workers we can have a lot of such errors,
// thus logging is done with DEBUG level.
- e.log.Debug(ctx, logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
+ e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
zap.Stringer("shard_id", sid),
zap.Uint32("error_count", errCount))
}
}
func isLogical(err error) bool {
- return errors.As(err, &logicerr.Logical{}) ||
- errors.Is(err, context.Canceled) ||
- errors.Is(err, context.DeadlineExceeded) ||
- errors.As(err, new(*apistatus.ResourceExhausted))
+ return errors.As(err, &logicerr.Logical{}) || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
}
// Option represents StorageEngine's constructor option.
@@ -189,6 +208,8 @@ type cfg struct {
metrics MetricRegister
+ shardPoolSize uint32
+
lowMem bool
containerSource atomic.Pointer[containerSource]
@@ -196,8 +217,8 @@ type cfg struct {
func defaultCfg() *cfg {
res := &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
- metrics: noopMetrics{},
+ log: &logger.Logger{Logger: zap.L()},
+ shardPoolSize: 20,
}
res.containerSource.Store(&containerSource{})
return res
@@ -211,18 +232,13 @@ func New(opts ...Option) *StorageEngine {
opts[i](c)
}
- evLimMtx := &sync.RWMutex{}
- evLimCond := sync.NewCond(evLimMtx)
-
return &StorageEngine{
- cfg: c,
- shards: make(map[string]hashedShard),
- closeCh: make(chan struct{}),
- setModeCh: make(chan setModeRequest),
- evacuateLimiter: &evacuationLimiter{
- guard: evLimMtx,
- statusCond: evLimCond,
- },
+ cfg: c,
+ shards: make(map[string]hashedShard),
+ shardPools: make(map[string]util.WorkerPool),
+ closeCh: make(chan struct{}),
+ setModeCh: make(chan setModeRequest),
+ evacuateLimiter: &evacuationLimiter{},
}
}
@@ -239,6 +255,13 @@ func WithMetrics(v MetricRegister) Option {
}
}
+// WithShardPoolSize returns option to specify size of worker pool for each shard.
+func WithShardPoolSize(sz uint32) Option {
+ return func(c *cfg) {
+ c.shardPoolSize = sz
+ }
+}
+
// WithErrorThreshold returns an option to specify size amount of errors after which
// shard is moved to read-only mode.
func WithErrorThreshold(sz uint32) Option {
@@ -274,7 +297,7 @@ func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (
return true, nil
}
- wasRemoved, err := container.WasRemoved(ctx, s.cs, id)
+ wasRemoved, err := container.WasRemoved(s.cs, id)
if err != nil {
return false, err
}
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index fc6d9ee9c..525e17f34 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -2,111 +2,141 @@ package engine
import (
"context"
- "fmt"
"path/filepath"
- "runtime/debug"
- "strings"
- "sync"
+ "sync/atomic"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "git.frostfs.info/TrueCloudLab/hrw"
+ "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
)
-type epochState struct {
- currEpoch uint64
-}
+type epochState struct{}
func (s epochState) CurrentEpoch() uint64 {
- return s.currEpoch
+ return 0
+}
+
+func BenchmarkExists(b *testing.B) {
+ b.Run("2 shards", func(b *testing.B) {
+ benchmarkExists(b, 2)
+ })
+ b.Run("4 shards", func(b *testing.B) {
+ benchmarkExists(b, 4)
+ })
+ b.Run("8 shards", func(b *testing.B) {
+ benchmarkExists(b, 8)
+ })
+}
+
+func benchmarkExists(b *testing.B, shardNum int) {
+ shards := make([]*shard.Shard, shardNum)
+ for i := range shardNum {
+ shards[i] = testNewShard(b)
+ }
+
+ e := testNewEngine(b).setInitializedShards(b, shards...).engine
+ defer func() { require.NoError(b, e.Close(context.Background())) }()
+
+ addr := oidtest.Address()
+ for range 100 {
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ err := Put(context.Background(), e, obj)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ var shPrm shard.ExistsPrm
+ shPrm.Address = addr
+ shPrm.ParentAddress = oid.Address{}
+ ok, _, err := e.exists(context.Background(), shPrm)
+ if err != nil || ok {
+ b.Fatalf("%t %v", ok, err)
+ }
+ }
}
type testEngineWrapper struct {
engine *StorageEngine
- shards []*shard.Shard
shardIDs []*shard.ID
}
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
- opts = append(testGetDefaultEngineOptions(t), opts...)
- return &testEngineWrapper{engine: New(opts...)}
+ engine := New(WithLogger(test.NewLogger(t)))
+ for _, opt := range opts {
+ opt(engine.cfg)
+ }
+ return &testEngineWrapper{
+ engine: engine,
+ }
+}
+
+func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper {
+ for _, s := range shards {
+ pool, err := ants.NewPool(10, ants.WithNonblocking(true))
+ require.NoError(t, err)
+
+ te.engine.shards[s.ID().String()] = hashedShard{
+ shardWrapper: shardWrapper{
+ errorCount: new(atomic.Uint32),
+ Shard: s,
+ },
+ hash: hrw.StringHash(s.ID().String()),
+ }
+ te.engine.shardPools[s.ID().String()] = pool
+ te.shardIDs = append(te.shardIDs, s.ID())
+ }
+ return te
}
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
- return te.setShardsNumOpts(t, num, func(_ int) []shard.Option {
- return testGetDefaultShardOptions(t)
- })
+ shards := make([]*shard.Shard, 0, num)
+
+ for range num {
+ shards = append(shards, testNewShard(t))
+ }
+
+ return te.setInitializedShards(t, shards...)
}
-func (te *testEngineWrapper) setShardsNumOpts(
- t testing.TB, num int, shardOpts func(id int) []shard.Option,
-) *testEngineWrapper {
- te.shards = make([]*shard.Shard, num)
- te.shardIDs = make([]*shard.ID, num)
+func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
for i := range num {
- shard, err := te.engine.createShard(context.Background(), shardOpts(i))
+ opts := shardOpts(i)
+ id, err := te.engine.AddShard(context.Background(), opts...)
require.NoError(t, err)
- require.NoError(t, te.engine.addShard(shard))
- te.shards[i] = shard
- te.shardIDs[i] = shard.ID()
+ te.shardIDs = append(te.shardIDs, id)
}
- require.Len(t, te.engine.shards, num)
return te
}
-func (te *testEngineWrapper) setShardsNumAdditionalOpts(
- t testing.TB, num int, shardOpts func(id int) []shard.Option,
-) *testEngineWrapper {
- return te.setShardsNumOpts(t, num, func(id int) []shard.Option {
- return append(testGetDefaultShardOptions(t), shardOpts(id)...)
- })
-}
-
-// prepare calls Open and Init on the created engine.
-func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper {
- require.NoError(t, te.engine.Open(context.Background()))
- require.NoError(t, te.engine.Init(context.Background()))
+func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
+ for i := range num {
+ defaultOpts := testDefaultShardOptions(t)
+ opts := append(defaultOpts, shardOpts(i)...)
+ id, err := te.engine.AddShard(context.Background(), opts...)
+ require.NoError(t, err)
+ te.shardIDs = append(te.shardIDs, id)
+ }
return te
}
-func testGetDefaultEngineOptions(t testing.TB) []Option {
- return []Option{
- WithLogger(test.NewLogger(t)),
- }
-}
-
-func testGetDefaultShardOptions(t testing.TB) []shard.Option {
- return []shard.Option{
- shard.WithLogger(test.NewLogger(t)),
- shard.WithBlobStorOptions(
- blobstor.WithStorages(
- newStorages(t, t.TempDir(), 1<<20)),
- blobstor.WithLogger(test.NewLogger(t)),
- ),
- shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
- shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...),
- shard.WithLimiter(&testQoSLimiter{t: t}),
- }
-}
-
-func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option {
- return []meta.Option{
- meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
- meta.WithPermissions(0o700),
- meta.WithEpochState(epochState{}),
- meta.WithLogger(test.NewLogger(t)),
- }
-}
-
func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
return []blobstor.SubStorage{
{
@@ -116,8 +146,7 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1),
blobovniczatree.WithPermissions(0o700),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))),
+ blobovniczatree.WithLogger(test.NewLogger(t))),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < smallSize
},
@@ -158,77 +187,33 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
}, smallFileStorage, largeFileStorage
}
-var _ qos.Limiter = (*testQoSLimiter)(nil)
+func testNewShard(t testing.TB, opts ...shard.Option) *shard.Shard {
+ sid, err := generateShardID()
+ require.NoError(t, err)
-type testQoSLimiter struct {
- t testing.TB
- quard sync.Mutex
- id int64
- readStacks map[int64][]byte
- writeStacks map[int64][]byte
+ shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t)...)
+ s := shard.New(append(shardOpts, opts...)...)
+
+ require.NoError(t, s.Open(context.Background()))
+ require.NoError(t, s.Init(context.Background()))
+
+ return s
}
-func (t *testQoSLimiter) SetMetrics(qos.Metrics) {}
-
-func (t *testQoSLimiter) Close() {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- var sb strings.Builder
- var seqN int
- for _, stack := range t.readStacks {
- seqN++
- sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack)))
+func testDefaultShardOptions(t testing.TB) []shard.Option {
+ return []shard.Option{
+ shard.WithLogger(test.NewLogger(t)),
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages(
+ newStorages(t, t.TempDir(), 1<<20)),
+ blobstor.WithLogger(test.NewLogger(t)),
+ ),
+ shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
+ meta.WithPermissions(0o700),
+ meta.WithEpochState(epochState{}),
+ meta.WithLogger(test.NewLogger(t)),
+ ),
}
- for _, stack := range t.writeStacks {
- seqN++
- sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack)))
- }
- require.True(t.t, seqN == 0, sb.String())
}
-
-func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- stack := debug.Stack()
-
- t.id++
- id := t.id
-
- if t.readStacks == nil {
- t.readStacks = make(map[int64][]byte)
- }
- t.readStacks[id] = stack
-
- return func() {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- delete(t.readStacks, id)
- }, nil
-}
-
-func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- stack := debug.Stack()
-
- t.id++
- id := t.id
-
- if t.writeStacks == nil {
- t.writeStacks = make(map[int64][]byte)
- }
- t.writeStacks[id] = stack
-
- return func() {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- delete(t.writeStacks, id)
- }, nil
-}
-
-func (t *testQoSLimiter) SetParentID(string) {}
diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go
index 57029dd5f..535435ceb 100644
--- a/pkg/local_object_storage/engine/error_test.go
+++ b/pkg/local_object_storage/engine/error_test.go
@@ -46,6 +46,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
var testShards [2]*testShard
te := testNewEngine(t,
+ WithShardPoolSize(1),
WithErrorThreshold(errThreshold),
).
setShardsNumOpts(t, 2, func(id int) []shard.Option {
@@ -66,8 +67,10 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
pilorama.WithPerm(0o700)),
}
- }).prepare(t)
+ })
e := te.engine
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
for i, id := range te.shardIDs {
testShards[i].id = id
@@ -148,17 +151,17 @@ func TestErrorReporting(t *testing.T) {
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- for i := range uint32(2) {
+ for i := uint32(0); i < 2; i++ {
_, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, false))
+ require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, false))
checkShardState(t, te.ng, te.shards[0].id, errThreshold+1, mode.ReadWrite)
- require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, true))
+ require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true))
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
require.NoError(t, te.ng.Close(context.Background()))
})
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index c08dfbf03..7bef6edfb 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -4,20 +4,19 @@ import (
"context"
"errors"
"fmt"
- "slices"
"strings"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -25,16 +24,6 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
- "golang.org/x/sync/errgroup"
-)
-
-const (
- // containerWorkerCountDefault is a default value of the count of
- // concurrent container evacuation workers.
- containerWorkerCountDefault = 10
- // objectWorkerCountDefault is a default value of the count of
- // concurrent object evacuation workers.
- objectWorkerCountDefault = 10
)
var (
@@ -55,6 +44,9 @@ func (s EvacuateScope) String() string {
var sb strings.Builder
first := true
if s&EvacuateScopeObjects == EvacuateScopeObjects {
+ if !first {
+ sb.WriteString(";")
+ }
sb.WriteString("objects")
first = false
}
@@ -85,11 +77,8 @@ type EvacuateShardPrm struct {
ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error)
TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error)
IgnoreErrors bool
+ Async bool
Scope EvacuateScope
- RepOneOnly bool
-
- ContainerWorkerCount uint32
- ObjectWorkerCount uint32
}
// EvacuateShardRes represents result of the EvacuateShard operation.
@@ -200,14 +189,21 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes {
return res
}
+const defaultEvacuateBatchSize = 100
+
+type pooledShard struct {
+ hashedShard
+ pool util.WorkerPool
+}
+
var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
// Evacuate moves data from one shard to the others.
// The shard being moved must be in read-only mode.
-func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error {
+func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*EvacuateShardRes, error) {
select {
case <-ctx.Done():
- return ctx.Err()
+ return nil, ctx.Err()
default:
}
@@ -219,6 +215,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate",
trace.WithAttributes(
attribute.StringSlice("shardIDs", shardIDs),
+ attribute.Bool("async", prm.Async),
attribute.Bool("ignoreErrors", prm.IgnoreErrors),
attribute.Stringer("scope", prm.Scope),
))
@@ -226,7 +223,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro
shards, err := e.getActualShards(shardIDs, prm)
if err != nil {
- return err
+ return nil, err
}
shardsToEvacuate := make(map[string]*shard.Shard)
@@ -239,36 +236,40 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro
}
res := NewEvacuateShardRes()
- ctx = context.WithoutCancel(ctx)
- eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
+ ctx = ctxOrBackground(ctx, prm.Async)
+ eg, egCtx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
if err != nil {
- return err
+ return nil, err
}
- var mtx sync.RWMutex
- copyShards := func() []hashedShard {
- mtx.RLock()
- defer mtx.RUnlock()
- t := slices.Clone(shards)
- return t
- }
eg.Go(func() error {
- return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate)
+ return e.evacuateShards(egCtx, shardIDs, prm, res, shards, shardsToEvacuate)
})
- return nil
+ if prm.Async {
+ return nil, nil
+ }
+
+ return res, eg.Wait()
+}
+
+func ctxOrBackground(ctx context.Context, background bool) context.Context {
+ if background {
+ return context.Background()
+ }
+ return ctx
}
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
var err error
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
trace.WithAttributes(
attribute.StringSlice("shardIDs", shardIDs),
+ attribute.Bool("async", prm.Async),
attribute.Bool("ignoreErrors", prm.IgnoreErrors),
attribute.Stringer("scope", prm.Scope),
- attribute.Bool("repOneOnly", prm.RepOneOnly),
))
defer func() {
@@ -276,51 +277,25 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
e.evacuateLimiter.Complete(err)
}()
- e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
- zap.Stringer("scope", prm.Scope))
+ e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
err = e.getTotals(ctx, prm, shardsToEvacuate, res)
if err != nil {
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
- zap.Stringer("scope", prm.Scope))
+ e.log.Error(logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
return err
}
- ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm)
- continueLoop := true
- for i := 0; continueLoop && i < len(shardIDs); i++ {
- select {
- case <-ctx.Done():
- continueLoop = false
- default:
- egShard.Go(func() error {
- err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject)
- if err != nil {
- cancel(err)
- }
- return err
- })
+ for _, shardID := range shardIDs {
+ if err = e.evacuateShard(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
+ e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
+ return err
}
}
- err = egShard.Wait()
- if err != nil {
- err = fmt.Errorf("shard error: %w", err)
- }
- errContainer := egContainer.Wait()
- errObject := egObject.Wait()
- if errContainer != nil {
- err = errors.Join(err, fmt.Errorf("container error: %w", errContainer))
- }
- if errObject != nil {
- err = errors.Join(err, fmt.Errorf("object error: %w", errObject))
- }
- if err != nil {
- e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
- zap.Stringer("scope", prm.Scope))
- return err
- }
- e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation,
+ e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation,
zap.Strings("shard_ids", shardIDs),
evacuationOperationLogField,
zap.Uint64("total_objects", res.ObjectsTotal()),
@@ -334,27 +309,6 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
return nil
}
-func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) (
- context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group,
-) {
- operationCtx, cancel := context.WithCancelCause(ctx)
- egObject, _ := errgroup.WithContext(operationCtx)
- objectWorkerCount := prm.ObjectWorkerCount
- if objectWorkerCount == 0 {
- objectWorkerCount = objectWorkerCountDefault
- }
- egObject.SetLimit(int(objectWorkerCount))
- egContainer, _ := errgroup.WithContext(operationCtx)
- containerWorkerCount := prm.ContainerWorkerCount
- if containerWorkerCount == 0 {
- containerWorkerCount = containerWorkerCountDefault
- }
- egContainer.SetLimit(int(containerWorkerCount))
- egShard, _ := errgroup.WithContext(operationCtx)
-
- return operationCtx, cancel, egShard, egContainer, egObject
-}
-
func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals")
defer span.End()
@@ -381,9 +335,8 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha
return nil
}
-func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
- egContainer *errgroup.Group, egObject *errgroup.Group,
+func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
trace.WithAttributes(
@@ -392,10 +345,11 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.Cancel
defer span.End()
if prm.Scope.WithObjects() {
- if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil {
+ if err := e.evacuateShardObjects(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
return err
}
}
+
if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() {
if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
return err
@@ -405,84 +359,44 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.Cancel
return nil
}
-func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
- egContainer *errgroup.Group, egObject *errgroup.Group,
+func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
- sh := shardsToEvacuate[shardID]
- var cntPrm shard.IterateOverContainersPrm
- cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error {
- select {
- case <-ctx.Done():
- return context.Cause(ctx)
- default:
- }
- egContainer.Go(func() error {
- var skip bool
- c, err := e.containerSource.Load().cs.Get(ctx, cnt)
- if err != nil {
- if client.IsErrContainerNotFound(err) {
- skip = true
- } else {
- return err
- }
- }
- if !skip && prm.RepOneOnly {
- skip = e.isNotRepOne(c)
- }
- if skip {
- countPrm := shard.CountAliveObjectsInContainerPrm{
- ObjectType: objType,
- ContainerID: cnt,
- }
- count, err := sh.CountAliveObjectsInContainer(ctx, countPrm)
- if err != nil {
- return err
- }
- res.objSkipped.Add(count)
- return nil
- }
- var objPrm shard.IterateOverObjectsInContainerPrm
- objPrm.ObjectType = objType
- objPrm.ContainerID = cnt
- objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
- select {
- case <-ctx.Done():
- return context.Cause(ctx)
- default:
- }
- egObject.Go(func() error {
- err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value)
- if err != nil {
- cancel(err)
- }
- return err
- })
- return nil
- }
- err = sh.IterateOverObjectsInContainer(ctx, objPrm)
- if err != nil {
- cancel(err)
- }
- return err
- })
- return nil
- }
+ var listPrm shard.ListWithCursorPrm
+ listPrm.WithCount(defaultEvacuateBatchSize)
+ sh := shardsToEvacuate[shardID]
sh.SetEvacuationInProgress(true)
- err := sh.IterateOverContainers(ctx, cntPrm)
- if err != nil {
- cancel(err)
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField)
+
+ var c *meta.Cursor
+ for {
+ listPrm.WithCursor(c)
+
+ // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
+ // because ListWithCursor works only with the metabase.
+ listRes, err := sh.ListWithCursor(ctx, listPrm)
+ if err != nil {
+ if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
+ break
+ }
+ e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
+
+ if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, shardsToEvacuate); err != nil {
+ return err
+ }
+
+ c = listRes.Cursor()
}
- return err
+ return nil
}
func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
sh := shardsToEvacuate[shardID]
- shards := getShards()
var listPrm pilorama.TreeListTreesPrm
first := true
@@ -509,7 +423,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string,
}
func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID,
- prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+ prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees",
trace.WithAttributes(
@@ -529,39 +443,39 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
return err
}
if success {
- e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal,
+ e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedLocal,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID),
- evacuationOperationLogField)
+ evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
res.trEvacuated.Add(1)
continue
}
moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm)
if err != nil {
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
+ e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err))
+ zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
}
if moved {
- e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote,
+ e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedRemote,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID),
zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK),
- evacuationOperationLogField)
+ evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
res.trEvacuated.Add(1)
} else if prm.IgnoreErrors {
res.trFailed.Add(1)
- e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
+ e.log.Warn(logs.EngineShardsEvacuationFailedToMoveTree,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err))
+ zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
} else {
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
+ e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree,
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err))
+ zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID)
}
}
@@ -570,14 +484,14 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) {
if prm.TreeHandler == nil {
- return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
+ return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
}
return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh)
}
func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID,
- prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+ prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) (bool, string, error) {
target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate)
if err != nil {
@@ -647,15 +561,15 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar
// findShardToEvacuateTree returns first shard according HRW or first shard with tree exists.
func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID,
- shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
-) (hashedShard, bool, error) {
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+) (pooledShard, bool, error) {
hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString()))
- var result hashedShard
+ var result pooledShard
var found bool
for _, target := range shards {
select {
case <-ctx.Done():
- return hashedShard{}, false, ctx.Err()
+ return pooledShard{}, false, ctx.Err()
default:
}
@@ -683,7 +597,7 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora
return result, found, nil
}
-func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) {
+func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) {
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -713,85 +627,84 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
// We must have all shards, to have correct information about their
// indexes in a sorted slice and set appropriate marks in the metabase.
// Evacuated shard is skipped during put.
- shards := make([]hashedShard, 0, len(e.shards))
+ shards := make([]pooledShard, 0, len(e.shards))
for id := range e.shards {
- shards = append(shards, e.shards[id])
+ shards = append(shards, pooledShard{
+ hashedShard: hashedShard(e.shards[id]),
+ pool: e.shardPools[id],
+ })
}
return shards, nil
}
-func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
- getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container,
+func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects",
+ trace.WithAttributes(
+ attribute.Int("objects_count", len(toEvacuate)),
+ ))
defer span.End()
- select {
- case <-ctx.Done():
- return context.Cause(ctx)
- default:
- }
-
- shards := getShards()
- addr := objInfo.Address
-
- var getPrm shard.GetPrm
- getPrm.SetAddress(addr)
- getPrm.SkipEvacCheck(true)
-
- getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm)
- if err != nil {
- if prm.IgnoreErrors {
- res.objFailed.Add(1)
- return nil
+ for i := range toEvacuate {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
}
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
- return err
- }
+ addr := toEvacuate[i].Address
- evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr)
- if err != nil {
- return err
- }
+ var getPrm shard.GetPrm
+ getPrm.SetAddress(addr)
+ getPrm.SkipEvacCheck(true)
- if evacuatedLocal {
- return nil
- }
+ getRes, err := sh.Get(ctx, getPrm)
+ if err != nil {
+ if prm.IgnoreErrors {
+ res.objFailed.Add(1)
+ continue
+ }
+ e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
- if prm.ObjectsHandler == nil {
- // Do not check ignoreErrors flag here because
- // ignoring errors on put make this command kinda useless.
- return fmt.Errorf("%w: %s", errPutShard, objInfo)
- }
+ evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, shardsToEvacuate, res)
+ if err != nil {
+ return err
+ }
- moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
- if err != nil {
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
- return err
- }
- if moved {
- res.objEvacuated.Add(1)
- } else if prm.IgnoreErrors {
- res.objFailed.Add(1)
- e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
- } else {
- return fmt.Errorf("object %s was not replicated", addr)
+ if evacuatedLocal {
+ continue
+ }
+
+ if prm.ObjectsHandler == nil {
+ // Do not check ignoreErrors flag here because
+ // ignoring errors on put make this command kinda useless.
+ return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i])
+ }
+
+ moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
+ if err != nil {
+ e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
+ if moved {
+ res.objEvacuated.Add(1)
+ } else if prm.IgnoreErrors {
+ res.objFailed.Add(1)
+ e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ } else {
+ return fmt.Errorf("object %s was not replicated", addr)
+ }
}
return nil
}
-func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
- p := c.Value.PlacementPolicy()
- for i := range p.NumberOfReplicas() {
- if p.ReplicaDescriptor(i).NumberOfObjects() > 1 {
- return true
- }
- }
- return false
-}
-
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
- shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container,
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes,
) (bool, error) {
hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString()))
for j := range shards {
@@ -804,14 +717,15 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add
if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
continue
}
- switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status {
+ switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object).status {
case putToShardSuccess:
res.objEvacuated.Add(1)
- e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard,
+ e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
zap.Stringer("from", sh.ID()),
zap.Stringer("to", shards[j].ID()),
zap.Stringer("addr", addr),
- evacuationOperationLogField)
+ evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return true, nil
case putToShardExists, putToShardRemoved:
res.objSkipped.Add(1)
diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go
index b75e8686d..1e6b9ccb1 100644
--- a/pkg/local_object_storage/engine/evacuate_limiter.go
+++ b/pkg/local_object_storage/engine/evacuate_limiter.go
@@ -3,7 +3,6 @@ package engine
import (
"context"
"fmt"
- "slices"
"sync"
"time"
@@ -95,7 +94,8 @@ func (s *EvacuationState) StartedAt() *time.Time {
if s == nil {
return nil
}
- if s.startedAt.IsZero() {
+ defaultTime := time.Time{}
+ if s.startedAt == defaultTime {
return nil
}
return &s.startedAt
@@ -105,7 +105,8 @@ func (s *EvacuationState) FinishedAt() *time.Time {
if s == nil {
return nil
}
- if s.finishedAt.IsZero() {
+ defaultTime := time.Time{}
+ if s.finishedAt == defaultTime {
return nil
}
return &s.finishedAt
@@ -122,7 +123,8 @@ func (s *EvacuationState) DeepCopy() *EvacuationState {
if s == nil {
return nil
}
- shardIDs := slices.Clone(s.shardIDs)
+ shardIDs := make([]string, len(s.shardIDs))
+ copy(shardIDs, s.shardIDs)
return &EvacuationState{
shardIDs: shardIDs,
@@ -139,8 +141,7 @@ type evacuationLimiter struct {
eg *errgroup.Group
cancel context.CancelFunc
- guard *sync.RWMutex
- statusCond *sync.Cond // used in unit tests
+ guard sync.RWMutex
}
func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) {
@@ -166,7 +167,6 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res
startedAt: time.Now().UTC(),
result: result,
}
- l.statusCond.Broadcast()
return l.eg, egCtx, nil
}
@@ -182,7 +182,6 @@ func (l *evacuationLimiter) Complete(err error) {
l.state.processState = EvacuateProcessStateCompleted
l.state.errMessage = errMsq
l.state.finishedAt = time.Now().UTC()
- l.statusCond.Broadcast()
l.eg = nil
}
@@ -217,7 +216,6 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error {
l.state = EvacuationState{}
l.eg = nil
l.cancel = nil
- l.statusCond.Broadcast()
return nil
}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index f2ba7d994..28529fab9 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -6,12 +6,9 @@ import (
"fmt"
"path/filepath"
"strconv"
- "sync"
- "sync/atomic"
"testing"
"time"
- coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
@@ -21,38 +18,14 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
-type containerStorage struct {
- cntmap map[cid.ID]*container.Container
- latency time.Duration
-}
-
-func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) {
- time.Sleep(cs.latency)
- v, ok := cs.cntmap[id]
- if !ok {
- return nil, new(apistatus.ContainerNotFound)
- }
- coreCnt := coreContainer.Container{
- Value: *v,
- }
- return &coreCnt, nil
-}
-
-func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
- return nil, nil
-}
-
func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) {
dir := t.TempDir()
@@ -75,9 +48,10 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
pilorama.WithPerm(0o700),
),
}
- }).
- prepare(t)
+ })
e, ids := te.engine, te.shardIDs
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))
treeID := "version"
@@ -85,15 +59,10 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
{Key: pilorama.AttributeVersion, Value: []byte("XXX")},
{Key: pilorama.AttributeFilename, Value: []byte("file.txt")},
}
- cnrMap := make(map[cid.ID]*container.Container)
- for _, sh := range ids {
- for i := range objPerShard {
- // Create dummy container
- cnr1 := container.Container{}
- cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i))
- contID := cidtest.ID()
- cnrMap[contID] = &cnr1
+ for _, sh := range ids {
+ for range objPerShard {
+ contID := cidtest.ID()
obj := testutil.GenerateObjectWithCID(contID)
objects = append(objects, obj)
@@ -107,7 +76,6 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
require.NoError(t, err)
}
}
- e.SetContainerSource(&containerStorage{cntmap: cnrMap})
return e, ids, objects
}
@@ -140,17 +108,16 @@ func TestEvacuateShardObjects(t *testing.T) {
prm.Scope = EvacuateScopeObjects
t.Run("must be read-only", func(t *testing.T) {
- err := e.Evacuate(context.Background(), prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, ErrMustBeReadOnly)
+ require.Equal(t, uint64(0), res.ObjectsEvacuated())
})
- require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
- err := e.Evacuate(context.Background(), prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.NoError(t, err)
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, st.ErrorMessage(), "")
- require.Equal(t, uint64(objPerShard), st.ObjectsEvacuated())
+ require.Equal(t, uint64(objPerShard), res.ObjectsEvacuated())
// We check that all objects are available both before and after shard removal.
// First case is a real-world use-case. It ensures that an object can be put in presense
@@ -187,46 +154,33 @@ func TestEvacuateShardObjects(t *testing.T) {
}
// Calling it again is OK, but all objects are already moved, so no new PUTs should be done.
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st = testWaitForEvacuationCompleted(t, e)
- require.Equal(t, st.ErrorMessage(), "")
- require.Equal(t, uint64(0), st.ObjectsEvacuated())
+ res, err = e.Evacuate(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), res.ObjectsEvacuated())
checkHasObjects(t)
e.mtx.Lock()
delete(e.shards, evacuateShardID)
+ delete(e.shardPools, evacuateShardID)
e.mtx.Unlock()
checkHasObjects(t)
}
-func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState {
- var st *EvacuationState
- var err error
- e.evacuateLimiter.waitForCompleted()
- st, err = e.GetEvacuationState(context.Background())
- require.NoError(t, err)
- require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
- return st
-}
-
func TestEvacuateObjectsNetwork(t *testing.T) {
t.Parallel()
errReplication := errors.New("handler error")
acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
- var n atomic.Uint64
- var mtx sync.Mutex
+ var n uint64
return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- mtx.Lock()
- defer mtx.Unlock()
- if n.Load() == max {
+ if n == max {
return false, errReplication
}
- n.Add(1)
+ n++
for i := range objects {
if addr == objectCore.AddressOf(objects[i]) {
require.Equal(t, objects[i], obj)
@@ -247,21 +201,21 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
evacuateShardID := ids[0].String()
- require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
prm.ShardID = ids[0:1]
prm.Scope = EvacuateScopeObjects
- err := e.Evacuate(context.Background(), prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, errMustHaveTwoShards)
+ require.Equal(t, uint64(0), res.ObjectsEvacuated())
prm.ObjectsHandler = acceptOneOf(objects, 2)
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Contains(t, st.ErrorMessage(), errReplication.Error())
- require.Equal(t, uint64(2), st.ObjectsEvacuated())
+ res, err = e.Evacuate(context.Background(), prm)
+ require.ErrorIs(t, err, errReplication)
+ require.Equal(t, uint64(2), res.ObjectsEvacuated())
})
t.Run("multiple shards, evacuate one", func(t *testing.T) {
t.Parallel()
@@ -270,26 +224,24 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
prm.ShardID = ids[1:2]
prm.ObjectsHandler = acceptOneOf(objects, 2)
prm.Scope = EvacuateScopeObjects
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Contains(t, st.ErrorMessage(), errReplication.Error())
- require.Equal(t, uint64(2), st.ObjectsEvacuated())
+ res, err := e.Evacuate(context.Background(), prm)
+ require.ErrorIs(t, err, errReplication)
+ require.Equal(t, uint64(2), res.ObjectsEvacuated())
t.Run("no errors", func(t *testing.T) {
prm.ObjectsHandler = acceptOneOf(objects, 3)
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, st.ErrorMessage(), "")
- require.Equal(t, uint64(3), st.ObjectsEvacuated())
+ res, err := e.Evacuate(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), res.ObjectsEvacuated())
})
})
t.Run("multiple shards, evacuate many", func(t *testing.T) {
@@ -310,7 +262,7 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
}
for i := range ids {
- require.NoError(t, e.shards[ids[i].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[i].String()].SetMode(mode.ReadOnly))
}
var prm EvacuateShardPrm
@@ -318,18 +270,16 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
prm.ObjectsHandler = acceptOneOf(objects, totalCount-1)
prm.Scope = EvacuateScopeObjects
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Contains(t, st.ErrorMessage(), errReplication.Error())
- require.Equal(t, totalCount-1, st.ObjectsEvacuated())
+ res, err := e.Evacuate(context.Background(), prm)
+ require.ErrorIs(t, err, errReplication)
+ require.Equal(t, totalCount-1, res.ObjectsEvacuated())
t.Run("no errors", func(t *testing.T) {
prm.ObjectsHandler = acceptOneOf(objects, totalCount)
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, st.ErrorMessage(), "")
- require.Equal(t, totalCount, st.ObjectsEvacuated())
+ res, err := e.Evacuate(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, totalCount, res.ObjectsEvacuated())
})
})
}
@@ -341,8 +291,8 @@ func TestEvacuateCancellation(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
prm.ShardID = ids[1:2]
@@ -359,39 +309,9 @@ func TestEvacuateCancellation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
- err := e.Evacuate(ctx, prm)
+ res, err := e.Evacuate(ctx, prm)
require.ErrorContains(t, err, "context canceled")
-}
-
-func TestEvacuateCancellationByError(t *testing.T) {
- t.Parallel()
- e, ids, _ := newEngineEvacuate(t, 2, 10)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
-
- var prm EvacuateShardPrm
- prm.ShardID = ids[1:2]
- var once atomic.Bool
- prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
- var err error
- flag := true
- if once.CompareAndSwap(false, true) {
- err = errors.New("test error")
- flag = false
- }
- return flag, err
- }
- prm.Scope = EvacuateScopeObjects
- prm.ObjectWorkerCount = 2
- prm.ContainerWorkerCount = 2
-
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Contains(t, st.ErrorMessage(), "test error")
+ require.Equal(t, uint64(0), res.ObjectsEvacuated())
}
func TestEvacuateSingleProcess(t *testing.T) {
@@ -400,11 +320,11 @@ func TestEvacuateSingleProcess(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
- blocker := make(chan any)
- running := make(chan any)
+ blocker := make(chan interface{})
+ running := make(chan interface{})
var prm EvacuateShardPrm
prm.ShardID = ids[1:2]
@@ -421,19 +341,20 @@ func TestEvacuateSingleProcess(t *testing.T) {
eg, egCtx := errgroup.WithContext(context.Background())
eg.Go(func() error {
- require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed")
+ res, err := e.Evacuate(egCtx, prm)
+ require.NoError(t, err, "first evacuation failed")
+ require.Equal(t, uint64(3), res.ObjectsEvacuated())
return nil
})
eg.Go(func() error {
<-running
- require.ErrorContains(t, e.Evacuate(egCtx, prm), "evacuate is already running for shard ids", "second evacuation not failed")
+ res, err := e.Evacuate(egCtx, prm)
+ require.ErrorContains(t, err, "evacuate is already running for shard ids", "second evacuation not failed")
+ require.Equal(t, uint64(0), res.ObjectsEvacuated())
close(blocker)
return nil
})
require.NoError(t, eg.Wait())
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, uint64(3), st.ObjectsEvacuated())
- require.Equal(t, st.ErrorMessage(), "")
}
func TestEvacuateObjectsAsync(t *testing.T) {
@@ -442,11 +363,11 @@ func TestEvacuateObjectsAsync(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
- blocker := make(chan any)
- running := make(chan any)
+ blocker := make(chan interface{})
+ running := make(chan interface{})
var prm EvacuateShardPrm
prm.ShardID = ids[1:2]
@@ -472,9 +393,9 @@ func TestEvacuateObjectsAsync(t *testing.T) {
eg, egCtx := errgroup.WithContext(context.Background())
eg.Go(func() error {
- require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed")
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
+ res, err := e.Evacuate(egCtx, prm)
+ require.NoError(t, err, "first evacuation failed")
+ require.Equal(t, uint64(3), res.ObjectsEvacuated())
return nil
})
@@ -497,7 +418,12 @@ func TestEvacuateObjectsAsync(t *testing.T) {
close(blocker)
- st = testWaitForEvacuationCompleted(t, e)
+ require.Eventually(t, func() bool {
+ st, err = e.GetEvacuationState(context.Background())
+ return st.ProcessingStatus() == EvacuateProcessStateCompleted
+ }, 3*time.Second, 10*time.Millisecond, "invalid final state")
+
+ require.NoError(t, err, "get final state failed")
require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
require.NotNil(t, st.StartedAt(), "invalid final started at")
require.NotNil(t, st.FinishedAt(), "invalid final finished at")
@@ -523,7 +449,7 @@ func TestEvacuateTreesLocal(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
prm.ShardID = ids[0:1]
@@ -543,9 +469,14 @@ func TestEvacuateTreesLocal(t *testing.T) {
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
- require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed")
+ res, err := e.Evacuate(context.Background(), prm)
+ require.NotNil(t, res, "sync evacuation result must be not nil")
+ require.NoError(t, err, "evacuation failed")
+
+ st, err = e.GetEvacuationState(context.Background())
+ require.NoError(t, err, "get evacuation state failed")
+ require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
- st = testWaitForEvacuationCompleted(t, e)
require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count")
require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count")
require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
@@ -597,10 +528,9 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.NoError(t, e.Close(context.Background()))
}()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
- mutex := sync.Mutex{}
evacuatedTreeOps := make(map[string][]*pilorama.Move)
var prm EvacuateShardPrm
prm.ShardID = ids
@@ -615,9 +545,7 @@ func TestEvacuateTreesRemote(t *testing.T) {
if op.Time == 0 {
return true, "", nil
}
- mutex.Lock()
evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op)
- mutex.Unlock()
height = op.Time + 1
}
}
@@ -636,9 +564,15 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
- require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed")
- st = testWaitForEvacuationCompleted(t, e)
+ res, err := e.Evacuate(context.Background(), prm)
+ require.NotNil(t, res, "sync evacuation must return not nil")
+ require.NoError(t, err, "evacuation failed")
+ st, err = e.GetEvacuationState(context.Background())
+ require.NoError(t, err, "get evacuation state failed")
+ require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
+
+ require.NoError(t, err, "get final state failed")
require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count")
require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count")
require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
@@ -671,157 +605,3 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.Equal(t, expectedTreeOps, evacuatedTreeOps)
}
-
-func TestEvacuateShardObjectsRepOneOnly(t *testing.T) {
- e, ids, _ := newEngineEvacuate(t, 2, 0)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
- // Create container with policy REP 2
- cnr1 := container.Container{}
- p1 := netmap.PlacementPolicy{}
- p1.SetContainerBackupFactor(1)
- x1 := netmap.ReplicaDescriptor{}
- x1.SetNumberOfObjects(2)
- p1.AddReplicas(x1)
- x1 = netmap.ReplicaDescriptor{}
- x1.SetNumberOfObjects(1)
- p1.AddReplicas(x1)
- cnr1.SetPlacementPolicy(p1)
- cnr1.SetAttribute("cnr", "cnr1")
-
- var idCnr1 cid.ID
- container.CalculateID(&idCnr1, cnr1)
-
- cnrmap := make(map[cid.ID]*container.Container)
- var cids []cid.ID
- cnrmap[idCnr1] = &cnr1
- cids = append(cids, idCnr1)
-
- // Create container with policy REP 1
- cnr2 := container.Container{}
- p2 := netmap.PlacementPolicy{}
- p2.SetContainerBackupFactor(1)
- x2 := netmap.ReplicaDescriptor{}
- x2.SetNumberOfObjects(1)
- p2.AddReplicas(x2)
- x2 = netmap.ReplicaDescriptor{}
- x2.SetNumberOfObjects(1)
- p2.AddReplicas(x2)
- cnr2.SetPlacementPolicy(p2)
- cnr2.SetAttribute("cnr", "cnr2")
-
- var idCnr2 cid.ID
- container.CalculateID(&idCnr2, cnr2)
- cnrmap[idCnr2] = &cnr2
- cids = append(cids, idCnr2)
-
- // Create container for simulate removing
- cnr3 := container.Container{}
- p3 := netmap.PlacementPolicy{}
- p3.SetContainerBackupFactor(1)
- x3 := netmap.ReplicaDescriptor{}
- x3.SetNumberOfObjects(1)
- p3.AddReplicas(x3)
- cnr3.SetPlacementPolicy(p3)
- cnr3.SetAttribute("cnr", "cnr3")
-
- var idCnr3 cid.ID
- container.CalculateID(&idCnr3, cnr3)
- cids = append(cids, idCnr3)
-
- e.SetContainerSource(&containerStorage{cntmap: cnrmap})
-
- for _, sh := range ids {
- for j := range 3 {
- for range 4 {
- obj := testutil.GenerateObjectWithCID(cids[j])
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
- _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
- require.NoError(t, err)
- }
- }
- }
-
- var prm EvacuateShardPrm
- prm.ShardID = ids[0:1]
- prm.Scope = EvacuateScopeObjects
- prm.RepOneOnly = true
-
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
-
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, "", st.ErrorMessage())
- require.Equal(t, uint64(4), st.ObjectsEvacuated())
- require.Equal(t, uint64(8), st.ObjectsSkipped())
- require.Equal(t, uint64(0), st.ObjectsFailed())
-}
-
-func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) {
- t.Skip()
- e, ids, _ := newEngineEvacuate(t, 2, 0)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
- cnrmap := make(map[cid.ID]*container.Container)
- var cids []cid.ID
- // Create containers with policy REP 1
- for i := range 10_000 {
- cnr1 := container.Container{}
- p1 := netmap.PlacementPolicy{}
- p1.SetContainerBackupFactor(1)
- x1 := netmap.ReplicaDescriptor{}
- x1.SetNumberOfObjects(2)
- p1.AddReplicas(x1)
- cnr1.SetPlacementPolicy(p1)
- cnr1.SetAttribute("i", strconv.Itoa(i))
-
- var idCnr1 cid.ID
- container.CalculateID(&idCnr1, cnr1)
-
- cnrmap[idCnr1] = &cnr1
- cids = append(cids, idCnr1)
- }
-
- e.SetContainerSource(&containerStorage{
- cntmap: cnrmap,
- latency: time.Millisecond * 100,
- })
-
- for _, cnt := range cids {
- for range 1 {
- obj := testutil.GenerateObjectWithCID(cnt)
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
- _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm)
- require.NoError(t, err)
- }
- }
-
- var prm EvacuateShardPrm
- prm.ShardID = ids[0:1]
- prm.Scope = EvacuateScopeObjects
- prm.RepOneOnly = true
- prm.ContainerWorkerCount = 10
-
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
-
- start := time.Now()
- err := e.Evacuate(context.Background(), prm)
- testWaitForEvacuationCompleted(t, e)
- t.Logf("evacuate took %v\n", time.Since(start))
- require.NoError(t, err)
-}
-
-func (l *evacuationLimiter) waitForCompleted() {
- l.guard.Lock()
- defer l.guard.Unlock()
-
- for l.state.processState != EvacuateProcessStateCompleted {
- l.statusCond.Wait()
- }
-}
diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go
index 7dac9eb97..d98101306 100644
--- a/pkg/local_object_storage/engine/exists.go
+++ b/pkg/local_object_storage/engine/exists.go
@@ -18,7 +18,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
exists := false
locked := false
- if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(shPrm.Address, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Exists(ctx, shPrm)
if err != nil {
if client.IsErrObjectAlreadyRemoved(err) {
@@ -37,7 +37,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
}
if !client.IsErrObjectNotFound(err) {
- e.reportShardError(ctx, sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address))
+ e.reportShardError(sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address))
}
return false
}
@@ -50,9 +50,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
}
return false
- }); err != nil {
- return false, false, err
- }
+ })
if alreadyRemoved {
return false, false, new(apistatus.ObjectAlreadyRemoved)
diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go
deleted file mode 100644
index 9b3c0833f..000000000
--- a/pkg/local_object_storage/engine/exists_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package engine
-
-import (
- "context"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/stretchr/testify/require"
-)
-
-func BenchmarkExists(b *testing.B) {
- b.Run("2 shards", func(b *testing.B) {
- benchmarkExists(b, 2)
- })
- b.Run("4 shards", func(b *testing.B) {
- benchmarkExists(b, 4)
- })
- b.Run("8 shards", func(b *testing.B) {
- benchmarkExists(b, 8)
- })
-}
-
-func benchmarkExists(b *testing.B, shardNum int) {
- e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine
- defer func() { require.NoError(b, e.Close(context.Background())) }()
-
- addr := oidtest.Address()
- for range 100 {
- obj := testutil.GenerateObjectWithCID(cidtest.ID())
- err := Put(context.Background(), e, obj, false)
- if err != nil {
- b.Fatal(err)
- }
- }
-
- b.ReportAllocs()
- b.ResetTimer()
- for range b.N {
- var shPrm shard.ExistsPrm
- shPrm.Address = addr
- shPrm.ECParentAddress = oid.Address{}
- ok, _, err := e.exists(context.Background(), shPrm)
- if err != nil || ok {
- b.Fatalf("%t %v", ok, err)
- }
- }
-}
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 0694c53f3..253256c34 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -55,7 +56,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
- defer elapsed("Get", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
res, err = e.get(ctx, prm)
@@ -66,6 +66,10 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
}
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
+ if e.metrics != nil {
+ defer elapsed("Get", e.metrics.AddMethodDuration)()
+ }
+
errNotFound := new(apistatus.ObjectNotFound)
var shPrm shard.GetPrm
@@ -78,9 +82,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
Engine: e,
}
- if err := it.tryGetWithMeta(ctx); err != nil {
- return GetRes{}, err
- }
+ it.tryGetWithMeta(ctx)
if it.SplitInfo != nil {
return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
@@ -99,18 +101,17 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
return GetRes{}, it.OutError
}
- if err := it.tryGetFromBlobstore(ctx); err != nil {
- return GetRes{}, err
- }
+ it.tryGetFromBlobstore(ctx)
if it.Object == nil {
return GetRes{}, it.OutError
}
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
- e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
+ e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
- zap.Error(it.MetaError),
- zap.Stringer("address", prm.addr))
+ zap.String("error", it.MetaError.Error()),
+ zap.Stringer("address", prm.addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
@@ -137,8 +138,8 @@ type getShardIterator struct {
ecInfoErr *objectSDK.ECInfoError
}
-func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error {
- return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+func (i *getShardIterator) tryGetWithMeta(ctx context.Context) {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
noMeta := sh.GetMode().NoMetabase()
i.ShardPrm.SetIgnoreMeta(noMeta)
@@ -185,19 +186,19 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error {
i.ObjectExpired = true
return true
default:
- i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
+ i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
return false
}
})
}
-func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error {
+func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) {
// If the object is not found but is present in metabase,
// try to fetch it from blobstor directly. If it is found in any
// blobstor, increase the error counter for the shard which contains the meta.
i.ShardPrm.SetIgnoreMeta(true)
- return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
if sh.GetMode().NoMetabase() {
// Already visited.
return false
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index d436dd411..6857a3631 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -68,7 +68,9 @@ func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err
func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head")
defer span.End()
- defer elapsed("Head", e.metrics.AddMethodDuration)()
+ if e.metrics != nil {
+ defer elapsed("Head", e.metrics.AddMethodDuration)()
+ }
var (
head *objectSDK.Object
@@ -82,7 +84,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error)
shPrm.SetAddress(prm.addr)
shPrm.SetRaw(prm.raw)
- if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold
res, err := sh.Head(ctx, shPrm)
if err != nil {
@@ -117,15 +119,13 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error)
outError = new(apistatus.ObjectNotFound)
return true
default:
- e.reportShardError(ctx, sh, "could not head object from shard", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(sh, "could not head object from shard", err, zap.Stringer("address", prm.addr))
return false
}
}
head = res.Object()
return true
- }); err != nil {
- return HeadRes{}, err
- }
+ })
if head != nil {
return HeadRes{head: head}, nil
diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go
index f9db81f16..5afc50f07 100644
--- a/pkg/local_object_storage/engine/head_test.go
+++ b/pkg/local_object_storage/engine/head_test.go
@@ -39,11 +39,11 @@ func TestHeadRaw(t *testing.T) {
link.SetSplitID(splitID)
t.Run("virtual object split in different shards", func(t *testing.T) {
- te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
- e := te.engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ s1 := testNewShard(t)
+ s2 := testNewShard(t)
- s1, s2 := te.shards[0], te.shards[1]
+ e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
+ defer e.Close(context.Background())
var putPrmLeft shard.PutPrm
putPrmLeft.SetObject(child)
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index e5f7072e2..683713f94 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -26,6 +27,9 @@ type InhumePrm struct {
forceRemoval bool
}
+// InhumeRes encapsulates results of inhume operation.
+type InhumeRes struct{}
+
// WithTarget sets a list of objects that should be inhumed and tombstone address
// as the reason for inhume operation.
//
@@ -63,226 +67,127 @@ var errInhumeFailure = errors.New("inhume operation failed")
// with that object) if WithForceRemoval option has been provided.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error {
+func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume")
defer span.End()
- defer elapsed("Inhume", e.metrics.AddMethodDuration)()
- return e.execIfNotBlocked(func() error {
- return e.inhume(ctx, prm)
- })
-}
-
-func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error {
- addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval)
- if err != nil {
+ err = e.execIfNotBlocked(func() error {
+ res, err = e.inhume(ctx, prm)
return err
- }
-
- var shPrm shard.InhumePrm
- if prm.forceRemoval {
- shPrm.ForceRemoval()
- }
-
- for shardID, addrs := range addrsPerShard {
- if prm.tombstone != nil {
- shPrm.SetTarget(*prm.tombstone, addrs...)
- } else {
- shPrm.MarkAsGarbage(addrs...)
- }
-
- sh, exists := e.shards[shardID]
- if !exists {
- e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard,
- zap.Error(errors.New("this shard was expected to exist")),
- zap.String("shard_id", shardID),
- )
- return errInhumeFailure
- }
-
- if _, err := sh.Inhume(ctx, shPrm); err != nil {
- e.reportInhumeError(ctx, err, sh)
- return err
- }
- }
-
- return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm)
-}
-
-func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) {
- if err == nil {
- return
- }
-
- var errLocked *apistatus.ObjectLocked
- switch {
- case errors.As(err, &errLocked):
- case errors.Is(err, shard.ErrLockObjectRemoval):
- case errors.Is(err, shard.ErrReadOnlyMode):
- case errors.Is(err, shard.ErrDegradedMode):
- default:
- e.reportShardError(ctx, hs, "couldn't inhume object in shard", err)
- }
-}
-
-// inhumeNotFoundObjects removes object which are not found on any shard.
-//
-// Besides an object not being found on any shard, it is also important to
-// remove it anyway in order to populate the metabase indexes because they are
-// responsible for the correct object status, i.e., the status will be `object
-// not found` without the indexes, the status will be `object is already
-// removed` with the indexes.
-//
-// It is suggested to evenly remove those objects on each shard with the batch
-// size equal to 1 + floor(number of objects / number of shards).
-func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error {
- if len(addrs) == 0 {
- return nil
- }
-
- var shPrm shard.InhumePrm
- if prm.forceRemoval {
- shPrm.ForceRemoval()
- }
-
- numObjectsPerShard := 1 + len(addrs)/len(e.shards)
-
- var inhumeErr error
- itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) {
- numObjects := min(numObjectsPerShard, len(addrs))
-
- if numObjects == 0 {
- return true
- }
-
- if prm.tombstone != nil {
- shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...)
- } else {
- shPrm.MarkAsGarbage(addrs[:numObjects]...)
- }
- addrs = addrs[numObjects:]
-
- _, inhumeErr = hs.Inhume(ctx, shPrm)
- e.reportInhumeError(ctx, inhumeErr, hs)
- return inhumeErr != nil
})
- if inhumeErr != nil {
- return inhumeErr
- }
- return itErr
-}
-
-// groupObjectsByShard groups objects based on the shard(s) they are stored on.
-//
-// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
-// the objects are locked.
-//
-// Returns two sets of objects: found objects which are grouped per shard and
-// not found object. Not found objects are objects which are not found on any
-// shard. This can happen if a node is a container node but doesn't participate
-// in a replica group of the object.
-func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) {
- groups = make(map[string][]oid.Address)
-
- var ids []string
- for _, addr := range addrs {
- ids, err = e.findShards(ctx, addr, checkLocked)
- if err != nil {
- return
- }
-
- if len(ids) == 0 {
- notFoundObjects = append(notFoundObjects, addr)
- continue
- }
-
- for _, id := range ids {
- groups[id] = append(groups[id], addr)
- }
- }
return
}
-// findShards determines the shard(s) where the object is stored.
-//
-// If the object is a root object, multiple shards will be returned.
-//
-// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
-// the objects are locked.
-func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) {
- var (
- ids []string
- retErr error
+func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
+ if e.metrics != nil {
+ defer elapsed("Inhume", e.metrics.AddMethodDuration)()
+ }
- prm shard.ExistsPrm
+ var shPrm shard.InhumePrm
+ if prm.forceRemoval {
+ shPrm.ForceRemoval()
+ }
- siErr *objectSDK.SplitInfoError
- ecErr *objectSDK.ECInfoError
-
- isRootObject bool
- objectExists bool
- )
-
- if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
- objectExists = false
-
- prm.Address = addr
- switch res, err := sh.Exists(ctx, prm); {
- case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err):
- // NOTE(@a-savchuk): there were some considerations that we can stop
- // immediately if the object is already removed or expired. However,
- // the previous method behavior was:
- // - keep iterating if it's a root object and already removed,
- // - stop iterating if it's not a root object and removed.
- //
- // Since my task was only improving method speed, let's keep the
- // previous method behavior. Continue if it's a root object.
- return !isRootObject
- case errors.As(err, &siErr) || errors.As(err, &ecErr):
- isRootObject = true
- objectExists = true
- case err != nil:
- e.reportShardError(
- ctx, sh, "couldn't check for presence in shard",
- err, zap.Stringer("address", addr),
- )
- case res.Exists():
- objectExists = true
- default:
- }
-
- if checkLocked {
- if isLocked, err := sh.IsLocked(ctx, addr); err != nil {
- e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck,
+ for i := range prm.addrs {
+ if !prm.forceRemoval {
+ locked, err := e.IsLocked(ctx, prm.addrs[i])
+ if err != nil {
+ e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
zap.Error(err),
- zap.Stringer("address", addr),
- )
- } else if isLocked {
- retErr = new(apistatus.ObjectLocked)
- return true
+ zap.Stringer("addr", prm.addrs[i]),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ } else if locked {
+ return InhumeRes{}, new(apistatus.ObjectLocked)
}
}
- // This exit point must come after checking if the object is locked,
- // since the locked index may be populated even if the object doesn't
- // exist.
- if !objectExists {
- return
+ if prm.tombstone != nil {
+ shPrm.SetTarget(*prm.tombstone, prm.addrs[i])
+ } else {
+ shPrm.MarkAsGarbage(prm.addrs[i])
}
- ids = append(ids, sh.ID().String())
-
- // Continue if it's a root object.
- return !isRootObject
- }); err != nil {
- return nil, err
+ ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, true)
+ if err != nil {
+ return InhumeRes{}, err
+ }
+ if !ok {
+ ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, false)
+ if err != nil {
+ return InhumeRes{}, err
+ } else if !ok {
+ return InhumeRes{}, errInhumeFailure
+ }
+ }
}
- if retErr != nil {
- return nil, retErr
- }
- return ids, nil
+ return InhumeRes{}, nil
+}
+
+// Returns ok if object was inhumed during this invocation or before.
+func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) {
+ root := false
+ var existPrm shard.ExistsPrm
+ var retErr error
+ var ok bool
+
+ e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
+ defer func() {
+ // if object is root we continue since information about it
+ // can be presented in other shards
+ if checkExists && root {
+ stop = false
+ }
+ }()
+
+ if checkExists {
+ existPrm.Address = addr
+ exRes, err := sh.Exists(ctx, existPrm)
+ if err != nil {
+ if client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err) {
+ // inhumed once - no need to be inhumed again
+ ok = true
+ return true
+ }
+
+ var siErr *objectSDK.SplitInfoError
+ var ecErr *objectSDK.ECInfoError
+ if !(errors.As(err, &siErr) || errors.As(err, &ecErr)) {
+ e.reportShardError(sh, "could not check for presents in shard", err, zap.Stringer("address", addr))
+ return
+ }
+
+ root = true
+ } else if !exRes.Exists() {
+ return
+ }
+ }
+
+ _, err := sh.Inhume(ctx, prm)
+ if err != nil {
+ var errLocked *apistatus.ObjectLocked
+ switch {
+ case errors.As(err, &errLocked):
+ retErr = new(apistatus.ObjectLocked)
+ return true
+ case errors.Is(err, shard.ErrLockObjectRemoval):
+ retErr = meta.ErrLockObjectRemoval
+ return true
+ case errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, shard.ErrDegradedMode):
+ retErr = err
+ return true
+ }
+
+ e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", addr))
+ return false
+ }
+
+ ok = true
+ return true
+ })
+
+ return ok, retErr
}
// IsLocked checks whether an object is locked according to StorageEngine's state.
@@ -297,18 +202,17 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
var err error
var outErr error
- if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) {
- locked, err = h.IsLocked(ctx, addr)
+ e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
+ locked, err = h.Shard.IsLocked(ctx, addr)
if err != nil {
- e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr))
+ e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("address", addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
outErr = err
return false
}
return locked
- }); err != nil {
- return false, err
- }
+ })
if locked {
return locked, nil
@@ -317,99 +221,94 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
return locked, outErr
}
-// GetLocks return lock id's if object is locked according to StorageEngine's state.
-func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks",
+// GetLocked return lock id's if object is locked according to StorageEngine's state.
+func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocked",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
defer span.End()
- var allLocks []oid.ID
+ var locked []oid.ID
var outErr error
- if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) {
- locks, err := h.GetLocks(ctx, addr)
+ e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
+ ld, err := h.Shard.GetLocked(ctx, addr)
if err != nil {
- e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr))
+ e.reportShardError(h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
outErr = err
}
- allLocks = append(allLocks, locks...)
+ locked = append(locked, ld...)
return false
- }); err != nil {
- return nil, err
+ })
+ if len(locked) > 0 {
+ return locked, nil
}
- if len(allLocks) > 0 {
- return allLocks, nil
- }
- return allLocks, outErr
+ return locked, outErr
}
func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
sh.HandleExpiredTombstones(ctx, addrs)
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err()))
return true
default:
return false
}
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err))
- }
+ })
}
func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
sh.HandleExpiredLocks(ctx, epoch, lockers)
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
return true
default:
return false
}
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err))
- }
+ })
}
func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) {
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
- sh.HandleDeletedLocks(ctx, lockers)
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+ sh.HandleDeletedLocks(lockers)
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
return true
default:
return false
}
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err))
- }
+ })
}
func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) {
if len(ids) == 0 {
return
}
+
idMap, err := e.selectNonExistentIDs(ctx, ids)
if err != nil {
return
}
+
if len(idMap) == 0 {
return
}
+
var failed bool
var prm shard.ContainerSizePrm
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
+ e.iterateOverUnsortedShards(func(sh hashedShard) bool {
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
failed = true
return true
default:
@@ -418,9 +317,9 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
var drop []cid.ID
for id := range idMap {
prm.SetContainerID(id)
- s, err := sh.ContainerSize(ctx, prm)
+ s, err := sh.ContainerSize(prm)
if err != nil {
- e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
failed = true
return true
}
@@ -433,18 +332,16 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
}
return len(idMap) == 0
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err))
- return
- }
+ })
+
if failed || len(idMap) == 0 {
return
}
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
+ e.iterateOverUnsortedShards(func(sh hashedShard) bool {
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
failed = true
return true
default:
@@ -452,20 +349,19 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
for id := range idMap {
if err := sh.DeleteContainerSize(ctx, id); err != nil {
- e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
failed = true
return true
}
}
return false
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err))
- return
- }
+ })
+
if failed {
return
}
+
for id := range idMap {
e.metrics.DeleteContainerSize(id.EncodeToString())
}
@@ -475,19 +371,22 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
if len(ids) == 0 {
return
}
+
idMap, err := e.selectNonExistentIDs(ctx, ids)
if err != nil {
return
}
+
if len(idMap) == 0 {
return
}
+
var failed bool
var prm shard.ContainerCountPrm
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
+ e.iterateOverUnsortedShards(func(sh hashedShard) bool {
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
failed = true
return true
default:
@@ -498,7 +397,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
prm.ContainerID = id
s, err := sh.ContainerCount(ctx, prm)
if err != nil {
- e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
failed = true
return true
}
@@ -511,18 +410,16 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
}
return len(idMap) == 0
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err))
- return
- }
+ })
+
if failed || len(idMap) == 0 {
return
}
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
+ e.iterateOverUnsortedShards(func(sh hashedShard) bool {
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
failed = true
return true
default:
@@ -530,20 +427,19 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
for id := range idMap {
if err := sh.DeleteContainerCount(ctx, id); err != nil {
- e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
failed = true
return true
}
}
return false
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err))
- return
- }
+ })
+
if failed {
return
}
+
for id := range idMap {
e.metrics.DeleteContainerCount(id.EncodeToString())
}
@@ -556,7 +452,7 @@ func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID)
for _, id := range ids {
isAvailable, err := cs.IsContainerAvailable(ctx, id)
if err != nil {
- e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
+ e.log.Warn(logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
return nil, err
}
if isAvailable {
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index 0e268cd23..9daa113f8 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -2,24 +2,14 @@ package engine
import (
"context"
- "fmt"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require"
- "golang.org/x/sync/errgroup"
)
func TestStorageEngine_Inhume(t *testing.T) {
@@ -47,31 +37,30 @@ func TestStorageEngine_Inhume(t *testing.T) {
t.Run("delete small object", func(t *testing.T) {
t.Parallel()
- e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ e := testNewEngine(t).setShardsNum(t, 1).engine
+ defer e.Close(context.Background())
- err := Put(context.Background(), e, parent, false)
+ err := Put(context.Background(), e, parent)
require.NoError(t, err)
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, false, fs)
+ addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
t.Run("delete big object", func(t *testing.T) {
t.Parallel()
+ s1 := testNewShard(t)
+ s2 := testNewShard(t)
- te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
- e := te.engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
-
- s1, s2 := te.shards[0], te.shards[1]
+ e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
+ defer e.Close(context.Background())
var putChild shard.PutPrm
putChild.SetObject(child)
@@ -86,257 +75,11 @@ func TestStorageEngine_Inhume(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, false, fs)
+ addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
}
-
-func TestStorageEngine_ECInhume(t *testing.T) {
- parentObjectAddress := oidtest.Address()
- containerID := parentObjectAddress.Container()
-
- chunkObject0 := testutil.GenerateObjectWithCID(containerID)
- chunkObject0.SetECHeader(objectSDK.NewECHeader(
- objectSDK.ECParentInfo{
- ID: parentObjectAddress.Object(),
- }, 0, 4, []byte{}, 0))
-
- chunkObject1 := testutil.GenerateObjectWithCID(containerID)
- chunkObject1.SetECHeader(objectSDK.NewECHeader(
- objectSDK.ECParentInfo{
- ID: parentObjectAddress.Object(),
- }, 1, 4, []byte{}, 0))
-
- tombstone := objectSDK.NewTombstone()
- tombstone.SetMembers([]oid.ID{parentObjectAddress.Object()})
- payload, err := tombstone.Marshal()
- require.NoError(t, err)
- tombstoneObject := testutil.GenerateObjectWithCID(containerID)
- tombstoneObject.SetType(objectSDK.TypeTombstone)
- tombstoneObject.SetPayload(payload)
- tombstoneObjectAddress := object.AddressOf(tombstoneObject)
-
- e := testNewEngine(t).setShardsNum(t, 5).prepare(t).engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
-
- require.NoError(t, Put(context.Background(), e, chunkObject0, false))
-
- require.NoError(t, Put(context.Background(), e, tombstoneObject, false))
-
- var inhumePrm InhumePrm
- inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress)
- err = e.Inhume(context.Background(), inhumePrm)
- require.NoError(t, err)
-
- var alreadyRemoved *apistatus.ObjectAlreadyRemoved
-
- require.ErrorAs(t, Put(context.Background(), e, chunkObject0, false), &alreadyRemoved)
-
- require.ErrorAs(t, Put(context.Background(), e, chunkObject1, false), &alreadyRemoved)
-}
-
-func TestInhumeExpiredRegularObject(t *testing.T) {
- t.Parallel()
-
- const currEpoch = 42
- const objectExpiresAfter = currEpoch - 1
-
- engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
- return []shard.Option{
- shard.WithDisabledGC(),
- shard.WithMetaBaseOptions(append(
- testGetDefaultMetabaseOptions(t),
- meta.WithEpochState(epochState{currEpoch}),
- )...),
- }
- }).prepare(t).engine
-
- cnr := cidtest.ID()
-
- generateAndPutObject := func() *objectSDK.Object {
- obj := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter))
-
- var putPrm PutPrm
- putPrm.Object = obj
- require.NoError(t, engine.Put(context.Background(), putPrm))
- return obj
- }
-
- t.Run("inhume with tombstone", func(t *testing.T) {
- obj := generateAndPutObject()
- ts := oidtest.Address()
- ts.SetContainer(cnr)
-
- var prm InhumePrm
- prm.WithTarget(ts, object.AddressOf(obj))
- err := engine.Inhume(context.Background(), prm)
- require.NoError(t, err)
- })
-
- t.Run("inhume without tombstone", func(t *testing.T) {
- obj := generateAndPutObject()
-
- var prm InhumePrm
- prm.MarkAsGarbage(object.AddressOf(obj))
- err := engine.Inhume(context.Background(), prm)
- require.NoError(t, err)
- })
-}
-
-func BenchmarkInhumeMultipart(b *testing.B) {
- // The benchmark result insignificantly depends on the number of shards,
- // so do not use it as a benchmark parameter, just set it big enough.
- numShards := 100
-
- for numObjects := 1; numObjects <= 10000; numObjects *= 10 {
- b.Run(
- fmt.Sprintf("objects=%d", numObjects),
- func(b *testing.B) {
- benchmarkInhumeMultipart(b, numShards, numObjects)
- },
- )
- }
-}
-
-func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) {
- b.StopTimer()
-
- engine := testNewEngine(b).
- setShardsNum(b, numShards).prepare(b).engine
- defer func() { require.NoError(b, engine.Close(context.Background())) }()
-
- cnt := cidtest.ID()
- eg := errgroup.Group{}
-
- for range b.N {
- addrs := make([]oid.Address, numObjects)
-
- for i := range numObjects {
- prm := PutPrm{}
-
- prm.Object = objecttest.Object().Parent()
- prm.Object.SetContainerID(cnt)
- prm.Object.SetType(objectSDK.TypeRegular)
-
- addrs[i] = object.AddressOf(prm.Object)
-
- eg.Go(func() error {
- return engine.Put(context.Background(), prm)
- })
- }
- require.NoError(b, eg.Wait())
-
- ts := oidtest.Address()
- ts.SetContainer(cnt)
-
- prm := InhumePrm{}
- prm.WithTarget(ts, addrs...)
-
- b.StartTimer()
- err := engine.Inhume(context.Background(), prm)
- require.NoError(b, err)
- b.StopTimer()
- }
-}
-
-func TestInhumeIfObjectDoesntExist(t *testing.T) {
- const numShards = 4
-
- engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine
- t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) })
-
- t.Run("inhume without tombstone", func(t *testing.T) {
- testInhumeIfObjectDoesntExist(t, engine, false, false)
- })
- t.Run("inhume with tombstone", func(t *testing.T) {
- testInhumeIfObjectDoesntExist(t, engine, true, false)
- })
- t.Run("force inhume", func(t *testing.T) {
- testInhumeIfObjectDoesntExist(t, engine, false, true)
- })
-
- t.Run("object is locked", func(t *testing.T) {
- t.Run("inhume without tombstone", func(t *testing.T) {
- testInhumeLockedIfObjectDoesntExist(t, engine, false, false)
- })
- t.Run("inhume with tombstone", func(t *testing.T) {
- testInhumeLockedIfObjectDoesntExist(t, engine, true, false)
- })
- t.Run("force inhume", func(t *testing.T) {
- testInhumeLockedIfObjectDoesntExist(t, engine, false, true)
- })
- })
-}
-
-func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) {
- t.Parallel()
-
- object := oidtest.Address()
- require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce))
-
- err := testHeadObject(e, object)
- if withTombstone {
- require.True(t, client.IsErrObjectAlreadyRemoved(err))
- } else {
- require.True(t, client.IsErrObjectNotFound(err))
- }
-}
-
-func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) {
- t.Parallel()
-
- object := oidtest.Address()
- require.NoError(t, testLockObject(e, object))
-
- err := testInhumeObject(t, e, object, withTombstone, withForce)
- if !withForce {
- var errLocked *apistatus.ObjectLocked
- require.ErrorAs(t, err, &errLocked)
- return
- }
- require.NoError(t, err)
-
- err = testHeadObject(e, object)
- if withTombstone {
- require.True(t, client.IsErrObjectAlreadyRemoved(err))
- } else {
- require.True(t, client.IsErrObjectNotFound(err))
- }
-}
-
-func testLockObject(e *StorageEngine, obj oid.Address) error {
- return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()})
-}
-
-func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error {
- tombstone := oidtest.Address()
- tombstone.SetContainer(obj.Container())
-
- // Due to the tests design it is possible to set both the options,
- // however removal with tombstone and force removal are exclusive.
- require.False(t, withTombstone && withForce)
-
- var inhumePrm InhumePrm
- if withTombstone {
- inhumePrm.WithTarget(tombstone, obj)
- } else {
- inhumePrm.MarkAsGarbage(obj)
- }
- if withForce {
- inhumePrm.WithForceRemoval()
- }
- return e.Inhume(context.Background(), inhumePrm)
-}
-
-func testHeadObject(e *StorageEngine, obj oid.Address) error {
- var headPrm HeadPrm
- headPrm.WithAddress(obj)
-
- _, err := e.Head(context.Background(), headPrm)
- return err
-}
diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go
index 073248862..cb3830b7c 100644
--- a/pkg/local_object_storage/engine/list.go
+++ b/pkg/local_object_storage/engine/list.go
@@ -7,7 +7,6 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
)
// ErrEndOfListing is returned from an object listing with cursor
@@ -99,10 +98,6 @@ func (l ListWithCursorRes) Cursor() *Cursor {
// Returns ErrEndOfListing if there are no more objects to return or count
// parameter set to zero.
func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.ListWithCursor")
- defer span.End()
- defer elapsed("ListWithCursor", e.metrics.AddMethodDuration)()
-
result := make([]objectcore.Info, 0, prm.count)
// Set initial cursors
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index 6cfa546f8..11a6c7841 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -68,7 +68,10 @@ func TestListWithCursor(t *testing.T) {
meta.WithEpochState(epochState{}),
),
}
- }).prepare(t).engine
+ }).engine
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
+
defer func() {
require.NoError(t, e.Close(context.Background()))
}()
@@ -79,7 +82,11 @@ func TestListWithCursor(t *testing.T) {
for range tt.objectNum {
containerID := cidtest.ID()
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
- err := e.Put(context.Background(), PutPrm{Object: obj})
+
+ var prm PutPrm
+ prm.WithObject(obj)
+
+ err := e.Put(context.Background(), prm)
require.NoError(t, err)
expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
}
diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go
index 3b0cf74f9..ac8fa9c6f 100644
--- a/pkg/local_object_storage/engine/lock.go
+++ b/pkg/local_object_storage/engine/lock.go
@@ -32,7 +32,6 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
attribute.Int("locked_count", len(locked)),
))
defer span.End()
- defer elapsed("Lock", e.metrics.AddMethodDuration)()
return e.execIfNotBlocked(func() error {
return e.lock(ctx, idCnr, locker, locked)
@@ -41,19 +40,11 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
for i := range locked {
- st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true)
- if err != nil {
- return err
- }
- switch st {
+ switch e.lockSingle(ctx, idCnr, locker, locked[i], true) {
case 1:
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
- st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false)
- if err != nil {
- return err
- }
- switch st {
+ switch e.lockSingle(ctx, idCnr, locker, locked[i], false) {
case 1:
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
@@ -69,13 +60,13 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
// - 0: fail
// - 1: locking irregular object
// - 2: ok
-func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) {
+func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
var addrLocked oid.Address
addrLocked.SetContainer(idCnr)
addrLocked.SetObject(locked)
- retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) {
defer func() {
// if object is root we continue since information about it
// can be presented in other shards
@@ -92,14 +83,20 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
var siErr *objectSDK.SplitInfoError
var eiErr *objectSDK.ECInfoError
if errors.As(err, &eiErr) {
- eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr)
- if !ok {
- return false
+ eclocked := []oid.ID{locked}
+ for _, chunk := range eiErr.ECInfo().Chunks {
+ var objID oid.ID
+ err = objID.ReadFromV2(chunk.ID)
+ if err != nil {
+ e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
+ return false
+ }
+ eclocked = append(eclocked, objID)
}
-
err = sh.Lock(ctx, idCnr, locker, eclocked)
if err != nil {
- e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return false
}
@@ -111,7 +108,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
// do not lock it
return true
}
- e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
+ e.reportShardError(sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return
}
@@ -124,7 +121,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
- e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
var errIrregular *apistatus.LockNonRegularObject
@@ -139,18 +136,3 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
})
return
}
-
-func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) {
- eclocked := []oid.ID{locked}
- for _, chunk := range eiErr.ECInfo().Chunks {
- var objID oid.ID
- err := objID.ReadFromV2(chunk.ID)
- if err != nil {
- e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
- zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
- return nil, false
- }
- eclocked = append(eclocked, objID)
- }
- return eclocked, true
-}
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index b8c9d6b1d..7fa7c27ef 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -6,12 +6,12 @@ import (
"testing"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -57,9 +57,11 @@ func TestLockUserScenario(t *testing.T) {
}),
shard.WithTombstoneSource(tss{lockerExpiresAfter}),
}
- }).
- prepare(t)
+ })
e := testEngine.engine
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
+
defer func() { require.NoError(t, e.Close(context.Background())) }()
lockerID := oidtest.ID()
@@ -95,7 +97,7 @@ func TestLockUserScenario(t *testing.T) {
id, _ := obj.ID()
objAddr.SetObject(id)
- err = Put(context.Background(), e, obj, false)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@@ -103,7 +105,7 @@ func TestLockUserScenario(t *testing.T) {
locker.WriteMembers([]oid.ID{id})
objectSDK.WriteLock(lockerObj, locker)
- err = Put(context.Background(), e, lockerObj, false)
+ err = Put(context.Background(), e, lockerObj)
require.NoError(t, err)
err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
@@ -114,7 +116,7 @@ func TestLockUserScenario(t *testing.T) {
inhumePrm.WithTarget(tombAddr, objAddr)
var objLockedErr *apistatus.ObjectLocked
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 4.
@@ -122,12 +124,12 @@ func TestLockUserScenario(t *testing.T) {
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
- err = Put(context.Background(), e, tombObj, false)
+ err = Put(context.Background(), e, tombObj)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorIs(t, err, meta.ErrLockObjectRemoval)
// 5.
@@ -136,7 +138,7 @@ func TestLockUserScenario(t *testing.T) {
inhumePrm.WithTarget(tombAddr, objAddr)
require.Eventually(t, func() bool {
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
return err == nil
}, 30*time.Second, time.Second)
}
@@ -160,9 +162,11 @@ func TestLockExpiration(t *testing.T) {
return pool
}),
}
- }).
- prepare(t)
+ })
e := testEngine.engine
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
+
defer func() { require.NoError(t, e.Close(context.Background())) }()
const lockerExpiresAfter = 13
@@ -173,7 +177,7 @@ func TestLockExpiration(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj, false)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@@ -185,7 +189,7 @@ func TestLockExpiration(t *testing.T) {
lock.SetType(objectSDK.TypeLock)
lock.SetAttributes(a)
- err = Put(context.Background(), e, lock, false)
+ err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
@@ -195,24 +199,20 @@ func TestLockExpiration(t *testing.T) {
require.NoError(t, err)
var inhumePrm InhumePrm
- tombAddr := oidtest.Address()
- tombAddr.SetContainer(cnr)
- inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
+ inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
var objLockedErr *apistatus.ObjectLocked
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 3.
e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1)
// 4.
- tombAddr = oidtest.Address()
- tombAddr.SetContainer(cnr)
- inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
+ inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
require.Eventually(t, func() bool {
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
return err == nil
}, 30*time.Second, time.Second)
}
@@ -239,8 +239,9 @@ func TestLockForceRemoval(t *testing.T) {
}),
shard.WithDeletedLockCallback(e.processDeletedLocks),
}
- }).
- prepare(t).engine
+ }).engine
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
defer func() { require.NoError(t, e.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -249,14 +250,14 @@ func TestLockForceRemoval(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj, false)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
- err = Put(context.Background(), e, lock, false)
+ err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
@@ -270,12 +271,12 @@ func TestLockForceRemoval(t *testing.T) {
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
var objLockedErr *apistatus.ObjectLocked
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 4.
@@ -283,64 +284,12 @@ func TestLockForceRemoval(t *testing.T) {
deletePrm.WithAddress(objectcore.AddressOf(lock))
deletePrm.WithForceRemoval()
- require.NoError(t, e.Delete(context.Background(), deletePrm))
+ _, err = e.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
// 5.
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
}
-
-func TestLockExpiredRegularObject(t *testing.T) {
- const currEpoch = 42
- const objectExpiresAfter = currEpoch - 1
-
- engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
- return []shard.Option{
- shard.WithDisabledGC(),
- shard.WithMetaBaseOptions(append(
- testGetDefaultMetabaseOptions(t),
- meta.WithEpochState(epochState{currEpoch}),
- )...),
- }
- }).prepare(t).engine
-
- cnr := cidtest.ID()
-
- object := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(object, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter))
-
- address := objectcore.AddressOf(object)
-
- var putPrm PutPrm
- putPrm.Object = object
- require.NoError(t, engine.Put(context.Background(), putPrm))
-
- var getPrm GetPrm
- var errNotFound *apistatus.ObjectNotFound
-
- getPrm.WithAddress(address)
- _, err := engine.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, &errNotFound)
-
- t.Run("lock expired regular object", func(t *testing.T) {
- engine.Lock(context.Background(),
- address.Container(),
- oidtest.ID(),
- []oid.ID{address.Object()},
- )
-
- res, err := engine.IsLocked(context.Background(), objectcore.AddressOf(object))
- require.NoError(t, err)
- require.True(t, res)
- })
-
- t.Run("get expired and locked regular object", func(t *testing.T) {
- getPrm.WithAddress(objectcore.AddressOf(object))
-
- res, err := engine.Get(context.Background(), getPrm)
- require.NoError(t, err)
- require.Equal(t, res.Object(), object)
- })
-}
diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go
index 963292d83..1c088c754 100644
--- a/pkg/local_object_storage/engine/metrics.go
+++ b/pkg/local_object_storage/engine/metrics.go
@@ -7,12 +7,34 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
-type (
- MetricRegister = metrics.EngineMetrics
- GCMetrics = metrics.GCMetrics
- WriteCacheMetrics = metrics.WriteCacheMetrics
- NullBool = metrics.NullBool
-)
+type MetricRegister interface {
+ AddMethodDuration(method string, d time.Duration)
+
+ SetObjectCounter(shardID, objectType string, v uint64)
+ AddToObjectCounter(shardID, objectType string, delta int)
+
+ SetMode(shardID string, mode mode.Mode)
+
+ AddToContainerSize(cnrID string, size int64)
+ DeleteContainerSize(cnrID string)
+ DeleteContainerCount(cnrID string)
+ AddToPayloadCounter(shardID string, size int64)
+ IncErrorCounter(shardID string)
+ ClearErrorCounter(shardID string)
+ DeleteShardMetrics(shardID string)
+
+ SetContainerObjectCounter(shardID, contID, objectType string, v uint64)
+ IncContainerObjectCounter(shardID, contID, objectType string)
+ SubContainerObjectCounter(shardID, contID, objectType string, v uint64)
+
+ IncRefillObjectsCount(shardID, path string, size int, success bool)
+ SetRefillPercent(shardID, path string, percent uint32)
+ SetRefillStatus(shardID, path, status string)
+ SetEvacuationInProgress(shardID string, value bool)
+
+ WriteCache() metrics.WriteCacheMetrics
+ GC() metrics.GCMetrics
+}
func elapsed(method string, addFunc func(method string, d time.Duration)) func() {
t := time.Now()
@@ -46,48 +68,3 @@ func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success
func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) {
m.storage.AddInhumedObjectCount(m.shardID, count, objectType)
}
-
-type (
- noopMetrics struct{}
- noopWriteCacheMetrics struct{}
- noopGCMetrics struct{}
-)
-
-var (
- _ MetricRegister = noopMetrics{}
- _ WriteCacheMetrics = noopWriteCacheMetrics{}
- _ GCMetrics = noopGCMetrics{}
-)
-
-func (noopMetrics) AddMethodDuration(string, time.Duration) {}
-func (noopMetrics) SetObjectCounter(string, string, uint64) {}
-func (noopMetrics) AddToObjectCounter(string, string, int) {}
-func (noopMetrics) SetMode(string, mode.Mode) {}
-func (noopMetrics) AddToContainerSize(string, int64) {}
-func (noopMetrics) DeleteContainerSize(string) {}
-func (noopMetrics) DeleteContainerCount(string) {}
-func (noopMetrics) AddToPayloadCounter(string, int64) {}
-func (noopMetrics) IncErrorCounter(string) {}
-func (noopMetrics) ClearErrorCounter(string) {}
-func (noopMetrics) DeleteShardMetrics(string) {}
-func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {}
-func (noopMetrics) IncContainerObjectCounter(string, string, string) {}
-func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {}
-func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {}
-func (noopMetrics) SetRefillPercent(string, string, uint32) {}
-func (noopMetrics) SetRefillStatus(string, string, string) {}
-func (noopMetrics) SetEvacuationInProgress(string, bool) {}
-func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} }
-func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} }
-
-func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {}
-func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {}
-func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {}
-func (noopWriteCacheMetrics) SetMode(string, string) {}
-func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {}
-func (noopWriteCacheMetrics) Close(string, string) {}
-
-func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {}
-func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {}
-func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {}
-func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {}
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 10cf5ffd5..f92d83745 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -9,6 +9,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -20,8 +22,7 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- Object *objectSDK.Object
- IsIndexedContainer bool
+ obj *objectSDK.Object
}
var errPutShard = errors.New("could not put object to any shard")
@@ -40,6 +41,13 @@ type putToShardRes struct {
err error
}
+// WithObject is a Put option to set object to save.
+//
+// Option is required.
+func (p *PutPrm) WithObject(obj *objectSDK.Object) {
+ p.obj = obj
+}
+
// Put saves the object to local storage.
//
// Returns any error encountered that
@@ -51,10 +59,9 @@ type putToShardRes struct {
func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
trace.WithAttributes(
- attribute.String("address", object.AddressOf(prm.Object).EncodeToString()),
+ attribute.String("address", object.AddressOf(prm.obj).EncodeToString()),
))
defer span.End()
- defer elapsed("Put", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
err = e.put(ctx, prm)
@@ -65,25 +72,29 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
}
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
- addr := object.AddressOf(prm.Object)
+ if e.metrics != nil {
+ defer elapsed("Put", e.metrics.AddMethodDuration)()
+ }
+
+ addr := object.AddressOf(prm.obj)
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
- var ecParent oid.Address
- if prm.Object.ECHeader() != nil {
- ecParent.SetObject(prm.Object.ECHeader().Parent())
- ecParent.SetContainer(addr.Container())
+ var parent oid.Address
+ if prm.obj.ECHeader() != nil {
+ parent.SetObject(prm.obj.ECHeader().Parent())
+ parent.SetContainer(addr.Container())
}
var shPrm shard.ExistsPrm
shPrm.Address = addr
- shPrm.ECParentAddress = ecParent
+ shPrm.ParentAddress = parent
existed, locked, err := e.exists(ctx, shPrm)
if err != nil {
return err
}
if !existed && locked {
- lockers, err := e.GetLocks(ctx, ecParent)
+ lockers, err := e.GetLocked(ctx, parent)
if err != nil {
return err
}
@@ -96,19 +107,17 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
}
var shRes putToShardRes
- if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
e.mtx.RLock()
- _, ok := e.shards[sh.ID().String()]
+ pool, ok := e.shardPools[sh.ID().String()]
e.mtx.RUnlock()
if !ok {
// Shard was concurrently removed, skip.
return false
}
- shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer)
+ shRes = e.putToShard(ctx, sh, pool, addr, prm.obj)
return shRes.status != putToShardUnknown
- }); err != nil {
- return err
- }
+ })
switch shRes.status {
case putToShardUnknown:
return errPutShard
@@ -123,64 +132,80 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// putToShard puts object to sh.
// Return putToShardStatus and error if it is necessary to propagate an error upper.
-func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard,
- addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool,
+func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool,
+ addr oid.Address, obj *objectSDK.Object,
) (res putToShardRes) {
- var existPrm shard.ExistsPrm
- existPrm.Address = addr
+ exitCh := make(chan struct{})
- exists, err := sh.Exists(ctx, existPrm)
- if err != nil {
- if shard.IsErrObjectExpired(err) {
- // object is already found but
- // expired => do nothing with it
+ if err := pool.Submit(func() {
+ defer close(exitCh)
+
+ var existPrm shard.ExistsPrm
+ existPrm.Address = addr
+
+ exists, err := sh.Exists(ctx, existPrm)
+ if err != nil {
+ if shard.IsErrObjectExpired(err) {
+ // object is already found but
+ // expired => do nothing with it
+ res.status = putToShardExists
+ } else {
+ e.log.Warn(logs.EngineCouldNotCheckObjectExistence,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ }
+
+ return // this is not ErrAlreadyRemoved error so we can go to the next shard
+ }
+
+ if exists.Exists() {
res.status = putToShardExists
- } else {
- e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
- zap.Stringer("shard_id", sh.ID()),
- zap.Error(err))
- }
-
- return // this is not ErrAlreadyRemoved error so we can go to the next shard
- }
-
- if exists.Exists() {
- res.status = putToShardExists
- return
- }
-
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
- putPrm.SetIndexAttributes(isIndexedContainer)
-
- _, err = sh.Put(ctx, putPrm)
- if err != nil {
- if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
- errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
- e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
- zap.Stringer("shard_id", sh.ID()),
- zap.Error(err))
- return
- }
- if client.IsErrObjectAlreadyRemoved(err) {
- e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
- zap.Stringer("shard_id", sh.ID()),
- zap.Error(err))
- res.status = putToShardRemoved
- res.err = err
return
}
- e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr))
- return
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+
+ _, err = sh.Put(ctx, putPrm)
+ if err != nil {
+ if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
+ errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
+ e.log.Warn(logs.EngineCouldNotPutObjectToShard,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return
+ }
+ if client.IsErrObjectAlreadyRemoved(err) {
+ e.log.Warn(logs.EngineCouldNotPutObjectToShard,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ res.status = putToShardRemoved
+ res.err = err
+ return
+ }
+
+ e.reportShardError(sh, "could not put object to shard", err, zap.Stringer("address", addr))
+ return
+ }
+
+ res.status = putToShardSuccess
+ }); err != nil {
+ e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Error(err))
+ close(exitCh)
}
- res.status = putToShardSuccess
+ <-exitCh
return
}
// Put writes provided object to local storage.
-func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error {
- return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer})
+func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
+ var putPrm PutPrm
+ putPrm.WithObject(obj)
+
+ return storage.Put(ctx, putPrm)
}
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index 7ec4742d8..cbf26ff4e 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -64,15 +65,6 @@ func (r RngRes) Object() *objectSDK.Object {
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, err error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange",
- trace.WithAttributes(
- attribute.String("address", prm.addr.EncodeToString()),
- attribute.String("offset", strconv.FormatUint(prm.off, 10)),
- attribute.String("length", strconv.FormatUint(prm.ln, 10)),
- ))
- defer span.End()
- defer elapsed("GetRange", e.metrics.AddMethodDuration)()
-
err = e.execIfNotBlocked(func() error {
res, err = e.getRange(ctx, prm)
return err
@@ -82,6 +74,18 @@ func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, e
}
func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.String("offset", strconv.FormatUint(prm.off, 10)),
+ attribute.String("length", strconv.FormatUint(prm.ln, 10)),
+ ))
+ defer span.End()
+
+ if e.metrics != nil {
+ defer elapsed("GetRange", e.metrics.AddMethodDuration)()
+ }
+
var shPrm shard.RngPrm
shPrm.SetAddress(prm.addr)
shPrm.SetRange(prm.off, prm.ln)
@@ -93,9 +97,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
Engine: e,
}
- if err := it.tryGetWithMeta(ctx); err != nil {
- return RngRes{}, err
- }
+ it.tryGetWithMeta(ctx)
if it.SplitInfo != nil {
return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
@@ -111,18 +113,17 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
return RngRes{}, it.OutError
}
- if err := it.tryGetFromBlobstor(ctx); err != nil {
- return RngRes{}, err
- }
+ it.tryGetFromBlobstor(ctx)
if it.Object == nil {
return RngRes{}, it.OutError
}
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
- e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
+ e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
- zap.Error(it.MetaError),
- zap.Stringer("address", prm.addr))
+ zap.String("error", it.MetaError.Error()),
+ zap.Stringer("address", prm.addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
@@ -161,8 +162,8 @@ type getRangeShardIterator struct {
Engine *StorageEngine
}
-func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error {
- return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
noMeta := sh.GetMode().NoMetabase()
i.HasDegraded = i.HasDegraded || noMeta
i.ShardPrm.SetIgnoreMeta(noMeta)
@@ -207,19 +208,19 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error {
return true // stop, return it back
default:
- i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
+ i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
return false
}
})
}
-func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error {
+func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) {
// If the object is not found but is present in metabase,
// try to fetch it from blobstor directly. If it is found in any
// blobstor, increase the error counter for the shard which contains the meta.
i.ShardPrm.SetIgnoreMeta(true)
- return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
if sh.GetMode().NoMetabase() {
// Already processed it without a metabase.
return false
diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go
index a29dd7ed9..83c6a54ed 100644
--- a/pkg/local_object_storage/engine/rebuild.go
+++ b/pkg/local_object_storage/engine/rebuild.go
@@ -4,7 +4,6 @@ import (
"context"
"sync"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
@@ -42,7 +41,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes
}
resGuard := &sync.Mutex{}
- concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)}
+ limiter := shard.NewRebuildLimiter(prm.ConcurrencyLimit)
eg, egCtx := errgroup.WithContext(ctx)
for _, shardID := range prm.ShardIDs {
@@ -62,7 +61,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes
}
err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{
- ConcurrencyLimiter: concLimiter,
+ ConcurrencyLimiter: limiter,
TargetFillPercent: prm.TargetFillPercent,
})
@@ -89,20 +88,3 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes
}
return res, nil
}
-
-type concurrencyLimiter struct {
- semaphore chan struct{}
-}
-
-func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
- select {
- case l.semaphore <- struct{}{}:
- return l.releaseWorkSlot, nil
- case <-ctx.Done():
- return nil, ctx.Err()
- }
-}
-
-func (l *concurrencyLimiter) releaseWorkSlot() {
- <-l.semaphore
-}
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index 8ab3c5217..5e1ced56a 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -43,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
prm.Concurrency = defaultRemoveDuplicatesConcurrency
}
- e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies,
+ e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies,
zap.Int("concurrency", prm.Concurrency))
// The mutext must be taken for the whole duration to avoid target shard being removed
@@ -55,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
// However we could change weights in future and easily forget this function.
for _, sh := range e.shards {
- e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
+ e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
ch := make(chan oid.Address)
errG, ctx := errgroup.WithContext(ctx)
@@ -93,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
})
}
if err := errG.Wait(); err != nil {
- e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
+ e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
return err
}
}
- e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies)
+ e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies)
return nil
}
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 4243a5481..6a8c9fab9 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -14,9 +14,8 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
- indexedContainer bool
+ cnr cid.ID
+ filters objectSDK.SearchFilters
}
// SelectRes groups the resulting values of Select operation.
@@ -25,9 +24,8 @@ type SelectRes struct {
}
// WithContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) {
+func (p *SelectPrm) WithContainerID(cnr cid.ID) {
p.cnr = cnr
- p.indexedContainer = indexedContainer
}
// WithFilters is a Select option to set the object filters.
@@ -51,29 +49,33 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe
attribute.String("container_id", prm.cnr.EncodeToString()),
))
defer span.End()
- defer elapsed("Select", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
- var sErr error
- res, sErr = e._select(ctx, prm)
- return sErr
+ res, err = e._select(ctx, prm)
+ return err
})
return
}
func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
+ if e.metrics != nil {
+ defer elapsed("Search", e.metrics.AddMethodDuration)()
+ }
+
addrList := make([]oid.Address, 0)
uniqueMap := make(map[string]struct{})
+ var outError error
+
var shPrm shard.SelectPrm
- shPrm.SetContainerID(prm.cnr, prm.indexedContainer)
+ shPrm.SetContainerID(prm.cnr)
shPrm.SetFilters(prm.filters)
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.Select(ctx, shPrm)
if err != nil {
- e.reportShardError(ctx, sh, "could not select objects from shard", err)
+ e.reportShardError(sh, "could not select objects from shard", err)
return false
}
@@ -85,13 +87,11 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes,
}
return false
- }); err != nil {
- return SelectRes{}, err
- }
+ })
return SelectRes{
addrList: addrList,
- }, nil
+ }, outError
}
// List returns `limit` available physically storage object addresses in engine.
@@ -99,26 +99,28 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes,
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) {
- defer elapsed("List", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
- var lErr error
- res, lErr = e.list(ctx, limit)
- return lErr
+ res, err = e.list(ctx, limit)
+ return err
})
return
}
func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) {
+ if e.metrics != nil {
+ defer elapsed("ListObjects", e.metrics.AddMethodDuration)()
+ }
+
addrList := make([]oid.Address, 0, limit)
uniqueMap := make(map[string]struct{})
ln := uint64(0)
// consider iterating over shuffled shards
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.List(ctx) // consider limit result of shard iterator
if err != nil {
- e.reportShardError(ctx, sh, "could not select objects from shard", err)
+ e.reportShardError(sh, "could not select objects from shard", err)
} else {
for _, addr := range res.AddressList() { // save only unique values
if _, ok := uniqueMap[addr.EncodeToString()]; !ok {
@@ -134,9 +136,7 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro
}
return false
- }); err != nil {
- return SelectRes{}, err
- }
+ })
return SelectRes{
addrList: addrList,
@@ -144,9 +144,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro
}
// Select selects objects from local storage using provided filters.
-func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
- selectPrm.WithContainerID(cnr, isIndexedContainer)
+ selectPrm.WithContainerID(cnr)
selectPrm.WithFilters(fs)
res, err := storage.Select(ctx, selectPrm)
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 69067c500..2ad6859e4 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -11,12 +11,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
"github.com/google/uuid"
+ "github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)
@@ -110,23 +108,25 @@ func (m *metricsWithID) SetEvacuationInProgress(value bool) {
func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) {
sh, err := e.createShard(ctx, opts)
if err != nil {
- return nil, fmt.Errorf("create a shard: %w", err)
+ return nil, fmt.Errorf("could not create a shard: %w", err)
}
err = e.addShard(sh)
if err != nil {
- return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
+ return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err)
}
- e.metrics.SetMode(sh.ID().String(), sh.GetMode())
+ if e.cfg.metrics != nil {
+ e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
+ }
return sh.ID(), nil
}
-func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) {
+func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*shard.Shard, error) {
id, err := generateShardID()
if err != nil {
- return nil, fmt.Errorf("generate shard ID: %w", err)
+ return nil, fmt.Errorf("could not generate shard ID: %w", err)
}
opts = e.appendMetrics(id, opts)
@@ -136,13 +136,13 @@ func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*
shard.WithExpiredTombstonesCallback(e.processExpiredTombstones),
shard.WithExpiredLocksCallback(e.processExpiredLocks),
shard.WithDeletedLockCallback(e.processDeletedLocks),
- shard.WithReportErrorFunc(e.reportShardErrorByID),
+ shard.WithReportErrorFunc(e.reportShardErrorBackground),
shard.WithZeroSizeCallback(e.processZeroSizeContainers),
shard.WithZeroCountCallback(e.processZeroCountContainers),
)...)
- if err := sh.UpdateID(ctx); err != nil {
- e.log.Warn(ctx, logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
+ if err := sh.UpdateID(); err != nil {
+ e.log.Warn(logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
}
return sh, nil
@@ -152,26 +152,28 @@ func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard
e.mtx.RLock()
defer e.mtx.RUnlock()
- opts = append(opts,
- shard.WithMetricsWriter(
- &metricsWithID{
- id: id.String(),
- mw: e.metrics,
- },
- ),
- shard.WithWriteCacheMetrics(
- &writeCacheMetrics{
- shardID: id.String(),
- metrics: e.metrics.WriteCache(),
- },
- ),
- shard.WithGCMetrics(
- &gcMetrics{
- storage: e.metrics.GC(),
- shardID: id.String(),
- },
- ),
- )
+ if e.metrics != nil {
+ opts = append(opts,
+ shard.WithMetricsWriter(
+ &metricsWithID{
+ id: id.String(),
+ mw: e.metrics,
+ },
+ ),
+ shard.WithWriteCacheMetrics(
+ &writeCacheMetrics{
+ shardID: id.String(),
+ metrics: e.metrics.WriteCache(),
+ },
+ ),
+ shard.WithGCMetrics(
+ &gcMetrics{
+ storage: e.metrics.GC(),
+ shardID: id.String(),
+ },
+ ),
+ )
+ }
return opts
}
@@ -180,6 +182,11 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
e.mtx.Lock()
defer e.mtx.Unlock()
+ pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true))
+ if err != nil {
+ return fmt.Errorf("could not create pool: %w", err)
+ }
+
strID := sh.ID().String()
if _, ok := e.shards[strID]; ok {
return fmt.Errorf("shard with id %s was already added", strID)
@@ -193,12 +200,14 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
hash: hrw.StringHash(strID),
}
+ e.shardPools[strID] = pool
+
return nil
}
// removeShards removes specified shards. Skips non-existent shards.
// Logs errors about shards that it could not Close after the removal.
-func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) {
+func (e *StorageEngine) removeShards(ids ...string) {
if len(ids) == 0 {
return
}
@@ -212,27 +221,33 @@ func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) {
continue
}
- e.metrics.DeleteShardMetrics(id)
+ sh.DeleteShardMetrics()
ss = append(ss, sh)
delete(e.shards, id)
- e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
+ pool, ok := e.shardPools[id]
+ if ok {
+ pool.Release()
+ delete(e.shardPools, id)
+ }
+
+ e.log.Info(logs.EngineShardHasBeenRemoved,
zap.String("id", id))
}
e.mtx.Unlock()
for _, sh := range ss {
- err := sh.SetMode(ctx, mode.Disabled)
+ err := sh.SetMode(mode.Disabled)
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled,
+ e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
}
- err = sh.Close(ctx)
+ err = sh.Close()
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard,
+ e.log.Error(logs.EngineCouldNotCloseRemovedShard,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
@@ -261,7 +276,7 @@ func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string })
h := hrw.StringHash(objAddr.EncodeToString())
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
- shards = append(shards, sh)
+ shards = append(shards, hashedShard(sh))
}
hrw.SortHasherSliceByValue(shards, h)
return shards
@@ -274,44 +289,32 @@ func (e *StorageEngine) unsortedShards() []hashedShard {
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
- shards = append(shards, sh)
+ shards = append(shards, hashedShard(sh))
}
return shards
}
-func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error {
+func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) {
for i, sh := range e.sortShards(addr) {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
if handler(i, sh) {
break
}
}
- return nil
}
-func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error {
+func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) {
for _, sh := range e.unsortedShards() {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
if handler(sh) {
break
}
}
- return nil
}
// SetShardMode sets mode of the shard with provided identifier.
//
// Returns an error if shard mode was not set, or shard was not found in storage engine.
-func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.Mode, resetErrorCounter bool) error {
+func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounter bool) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -319,9 +322,9 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M
if id.String() == shID {
if resetErrorCounter {
sh.errorCount.Store(0)
- e.metrics.ClearErrorCounter(shID)
+ sh.Shard.ClearErrorCounter()
}
- return sh.SetMode(ctx, m)
+ return sh.SetMode(m)
}
}
@@ -330,6 +333,8 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M
// HandleNewEpoch notifies every shard about NewEpoch event.
func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
+ ev := shard.EventNewEpoch(epoch)
+
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -337,54 +342,54 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
select {
case <-ctx.Done():
return
- case sh.NotificationChannel() <- epoch:
+ case sh.NotificationChannel() <- ev:
default:
- e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
+ e.log.Debug(logs.ShardEventProcessingInProgress,
zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))
}
}
}
-func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error {
+func (e *StorageEngine) DetachShards(ids []*shard.ID) error {
if len(ids) == 0 {
return logicerr.New("ids must be non-empty")
}
- deletedShards, err := e.deleteShards(ctx, ids)
+ deletedShards, err := e.deleteShards(ids)
if err != nil {
return err
}
- return e.closeShards(ctx, deletedShards)
+ return e.closeShards(deletedShards)
}
// closeShards closes deleted shards. Tries to close all shards.
// Returns single error with joined shard errors.
-func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedShard) error {
+func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
var multiErr error
var multiErrGuard sync.Mutex
var eg errgroup.Group
for _, sh := range deletedShards {
eg.Go(func() error {
- err := sh.SetMode(ctx, mode.Disabled)
+ err := sh.SetMode(mode.Disabled)
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled,
+ e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
multiErrGuard.Lock()
- multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err))
+ multiErr = errors.Join(multiErr, fmt.Errorf("could not change shard (id:%s) mode to disabled: %w", sh.ID(), err))
multiErrGuard.Unlock()
}
- err = sh.Close(ctx)
+ err = sh.Close()
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard,
+ e.log.Error(logs.EngineCouldNotCloseRemovedShard,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
multiErrGuard.Lock()
- multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err))
+ multiErr = errors.Join(multiErr, fmt.Errorf("could not close removed shard (id:%s): %w", sh.ID(), err))
multiErrGuard.Unlock()
}
return nil
@@ -399,7 +404,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS
// deleteShards deletes shards with specified ids from engine shard list
// and releases all engine resources associated with shards.
// Returns deleted shards or error if some shard could not be deleted.
-func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]hashedShard, error) {
+func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) {
ss := make([]hashedShard, 0, len(ids))
e.mtx.Lock()
@@ -421,11 +426,17 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha
for _, sh := range ss {
idStr := sh.ID().String()
- e.metrics.DeleteShardMetrics(idStr)
+ sh.DeleteShardMetrics()
delete(e.shards, idStr)
- e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
+ pool, ok := e.shardPools[idStr]
+ if ok {
+ pool.Release()
+ delete(e.shardPools, idStr)
+ }
+
+ e.log.Info(logs.EngineShardHasBeenRemoved,
zap.String("id", idStr))
}
@@ -435,48 +446,3 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha
func (s hashedShard) Hash() uint64 {
return s.hash
}
-
-func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) {
- var err error
- var info []shard.Info
- prm := shard.ExistsPrm{
- Address: obj,
- }
- var siErr *objectSDK.SplitInfoError
- var ecErr *objectSDK.ECInfoError
-
- if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) {
- res, exErr := hs.Exists(ctx, prm)
- if exErr != nil {
- if client.IsErrObjectAlreadyRemoved(exErr) {
- err = new(apistatus.ObjectAlreadyRemoved)
- return true
- }
-
- // Check if error is either SplitInfoError or ECInfoError.
- // True means the object is virtual.
- if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) {
- info = append(info, hs.DumpInfo())
- return false
- }
-
- if shard.IsErrObjectExpired(exErr) {
- err = exErr
- return true
- }
-
- if !client.IsErrObjectNotFound(exErr) {
- e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address))
- }
-
- return false
- }
- if res.Exists() {
- info = append(info, hs.DumpInfo())
- }
- return false
- }); itErr != nil {
- return nil, itErr
- }
- return info, err
-}
diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go
index 3aa9629b0..3347d58f1 100644
--- a/pkg/local_object_storage/engine/shards_test.go
+++ b/pkg/local_object_storage/engine/shards_test.go
@@ -13,10 +13,11 @@ import (
func TestRemoveShard(t *testing.T) {
const numOfShards = 6
- te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
+ te := testNewEngine(t).setShardsNum(t, numOfShards)
e, ids := te.engine, te.shardIDs
defer func() { require.NoError(t, e.Close(context.Background())) }()
+ require.Equal(t, numOfShards, len(e.shardPools))
require.Equal(t, numOfShards, len(e.shards))
removedNum := numOfShards / 2
@@ -32,10 +33,11 @@ func TestRemoveShard(t *testing.T) {
for id, remove := range mSh {
if remove {
- e.removeShards(context.Background(), id)
+ e.removeShards(id)
}
}
+ require.Equal(t, numOfShards-removedNum, len(e.shardPools))
require.Equal(t, numOfShards-removedNum, len(e.shards))
for id, removed := range mSh {
@@ -49,15 +51,15 @@ func TestDisableShards(t *testing.T) {
const numOfShards = 2
- te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
+ te := testNewEngine(t).setShardsNum(t, numOfShards)
e, ids := te.engine, te.shardIDs
defer func() { require.NoError(t, e.Close(context.Background())) }()
- require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical))
- require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical))
- require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical))
+ require.ErrorAs(t, e.DetachShards(ids), new(logicerr.Logical))
+ require.ErrorAs(t, e.DetachShards(nil), new(logicerr.Logical))
+ require.ErrorAs(t, e.DetachShards([]*shard.ID{}), new(logicerr.Logical))
- require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]}))
+ require.NoError(t, e.DetachShards([]*shard.ID{ids[0]}))
require.Equal(t, 1, len(e.shards))
}
diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go
index cfd15b4d4..39122628f 100644
--- a/pkg/local_object_storage/engine/tree.go
+++ b/pkg/local_object_storage/engine/tree.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.opentelemetry.io/otel/attribute"
@@ -36,9 +37,10 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor,
lm, err := lst[index].TreeMove(ctx, d, treeID, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err,
+ e.reportShardError(lst[index], "can't perform `TreeMove`", err,
zap.Stringer("cid", d.CID),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return nil, err
@@ -69,9 +71,10 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip
lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err,
+ e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err,
zap.Stringer("cid", d.CID),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return nil, err
}
@@ -97,36 +100,10 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str
err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err,
+ e.reportShardError(lst[index], "can't perform `TreeApply`", err,
zap.Stringer("cid", cnr),
- zap.String("tree", treeID))
- }
- return err
- }
- return nil
-}
-
-// TreeApplyBatch implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApplyBatch",
- trace.WithAttributes(
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- index, lst, err := e.getTreeShard(ctx, cnr, treeID)
- if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- return err
- }
-
- err = lst[index].TreeApplyBatch(ctx, cnr, treeID, m)
- if err != nil {
- if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err,
- zap.Stringer("cid", cnr),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return err
}
@@ -155,9 +132,10 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err,
+ e.reportShardError(sh, "can't perform `TreeGetByPath`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -187,9 +165,10 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err,
+ e.reportShardError(sh, "can't perform `TreeGetMeta`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -218,9 +197,10 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err,
+ e.reportShardError(sh, "can't perform `TreeGetChildren`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -230,7 +210,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
}
// TreeSortedByFilename implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
+func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
@@ -241,7 +221,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID,
var err error
var nodes []pilorama.MultiNodeInfo
- var cursor *pilorama.Cursor
+ var cursor *string
for _, sh := range e.sortShards(cid) {
nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
if err != nil {
@@ -249,9 +229,10 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID,
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err,
+ e.reportShardError(sh, "can't perform `TreeSortedByFilename`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -280,9 +261,10 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err,
+ e.reportShardError(sh, "can't perform `TreeGetOpLog`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -309,9 +291,10 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) {
- e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err,
+ e.reportShardError(sh, "can't perform `TreeDrop`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -338,8 +321,9 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string,
return nil, err
}
- e.reportShardError(ctx, sh, "can't perform `TreeList`", err,
- zap.Stringer("cid", cid))
+ e.reportShardError(sh, "can't perform `TreeList`", err,
+ zap.Stringer("cid", cid),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
// returns as much info about
// trees as possible
@@ -403,9 +387,10 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK
err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err,
+ e.reportShardError(lst[index], "can't update tree synchronization height", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return err
}
@@ -429,9 +414,10 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't read tree synchronization height", err,
+ e.reportShardError(sh, "can't read tree synchronization height", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index ea0a9e74e..6f694f082 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -37,7 +37,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
for i := range objCount {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
- err := Put(context.Background(), te.ng, obj, false)
+ err := Put(context.Background(), te.ng, obj)
if err != nil {
b.Fatal(err)
}
@@ -50,7 +50,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
b.Run("search", func(b *testing.B) {
var prm SelectPrm
- prm.WithContainerID(cid, true)
+ prm.WithContainerID(cid)
var fs objectSDK.SearchFilters
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
diff --git a/pkg/local_object_storage/internal/log/log.go b/pkg/local_object_storage/internal/log/log.go
index 6b101fa60..23740868d 100644
--- a/pkg/local_object_storage/internal/log/log.go
+++ b/pkg/local_object_storage/internal/log/log.go
@@ -1,16 +1,14 @@
package storagelog
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
// Write writes message about storage engine's operation to logger.
-func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) {
- logger.Debug(ctx, logs.StorageOperation, fields...)
+func Write(logger *logger.Logger, fields ...zap.Field) {
+ logger.Debug(logs.StorageOperation, fields...)
}
// AddressField returns logger's field for object address.
diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go
index d46365296..586b3dcc6 100644
--- a/pkg/local_object_storage/internal/storagetest/storage.go
+++ b/pkg/local_object_storage/internal/storagetest/storage.go
@@ -11,9 +11,9 @@ import (
// Component represents single storage component.
type Component interface {
Open(context.Context, mode.Mode) error
- SetMode(context.Context, mode.Mode) error
- Init(context.Context) error
- Close(context.Context) error
+ SetMode(mode.Mode) error
+ Init() error
+ Close() error
}
// Constructor constructs storage component.
@@ -59,18 +59,18 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) {
// Use-case: irrecoverable error on some components, close everything.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Close())
})
t.Run("RO", func(t *testing.T) {
// Use-case: irrecoverable error on some components, close everything.
// Open in read-only must be done after the db is here.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init(context.Background()))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Init())
+ require.NoError(t, s.Close())
require.NoError(t, s.Open(context.Background(), mode.ReadOnly))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Close())
})
}
@@ -79,9 +79,9 @@ func TestCloseTwice(t *testing.T, cons Constructor) {
// Use-case: move to maintenance mode twice, first time failed.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init(context.Background()))
- require.NoError(t, s.Close(context.Background()))
- require.NoError(t, s.Close(context.Background())) // already closed, no-op
+ require.NoError(t, s.Init())
+ require.NoError(t, s.Close())
+ require.NoError(t, s.Close()) // already closed, no-op
}
// TestSetMode checks that any mode transition can be done safely.
@@ -91,23 +91,23 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) {
// call `SetMode` on all not-yet-initialized components.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.SetMode(context.Background(), m))
+ require.NoError(t, s.SetMode(m))
t.Run("after open in RO", func(t *testing.T) {
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Close())
require.NoError(t, s.Open(context.Background(), mode.ReadOnly))
- require.NoError(t, s.SetMode(context.Background(), m))
+ require.NoError(t, s.SetMode(m))
})
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Close())
})
t.Run("after init", func(t *testing.T) {
s := cons(t)
// Use-case: notmal node operation.
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init(context.Background()))
- require.NoError(t, s.SetMode(context.Background(), m))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Init())
+ require.NoError(t, s.SetMode(m))
+ require.NoError(t, s.Close())
})
}
@@ -115,8 +115,8 @@ func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) {
// Use-case: normal node operation.
s := cons(t)
require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init(context.Background()))
- require.NoError(t, s.SetMode(context.Background(), from))
- require.NoError(t, s.SetMode(context.Background(), to))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Init())
+ require.NoError(t, s.SetMode(from))
+ require.NoError(t, s.SetMode(to))
+ require.NoError(t, s.Close())
}
diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go
index 52b199b0b..383c596af 100644
--- a/pkg/local_object_storage/internal/testutil/generators.go
+++ b/pkg/local_object_storage/internal/testutil/generators.go
@@ -1,9 +1,7 @@
package testutil
import (
- cryptorand "crypto/rand"
"encoding/binary"
- "math/rand"
"sync/atomic"
"testing"
@@ -11,6 +9,7 @@ import (
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/rand"
)
// AddressGenerator is the interface of types that generate object addresses.
@@ -62,7 +61,7 @@ var _ ObjectGenerator = &SeqObjGenerator{}
func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object {
data := make([]byte, sz)
- _, _ = cryptorand.Read(data)
+ _, _ = rand.Read(data)
obj := GenerateObjectWithCIDWithPayload(cid, data)
obj.SetID(oid)
return obj
@@ -83,7 +82,7 @@ var _ ObjectGenerator = &RandObjGenerator{}
func (g *RandObjGenerator) Next() *objectSDK.Object {
var id oid.ID
- _, _ = cryptorand.Read(id[:])
+ _, _ = rand.Read(id[:])
return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize)
}
diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go
index 1087e40be..60e9211d5 100644
--- a/pkg/local_object_storage/internal/testutil/object.go
+++ b/pkg/local_object_storage/internal/testutil/object.go
@@ -1,7 +1,6 @@
package testutil
import (
- "crypto/rand"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@@ -12,6 +11,7 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "golang.org/x/exp/rand"
)
const defaultDataSize = 32
diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go
deleted file mode 100644
index de1479e6f..000000000
--- a/pkg/local_object_storage/metabase/bucket_cache.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package meta
-
-import (
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "go.etcd.io/bbolt"
-)
-
-type bucketCache struct {
- locked *bbolt.Bucket
- graveyard *bbolt.Bucket
- garbage *bbolt.Bucket
- expired map[cid.ID]*bbolt.Bucket
- primary map[cid.ID]*bbolt.Bucket
-}
-
-func newBucketCache() *bucketCache {
- return &bucketCache{}
-}
-
-func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
- if bc == nil {
- return tx.Bucket(bucketNameLocked)
- }
- return getBucket(&bc.locked, tx, bucketNameLocked)
-}
-
-func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
- if bc == nil {
- return tx.Bucket(graveyardBucketName)
- }
- return getBucket(&bc.graveyard, tx, graveyardBucketName)
-}
-
-func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
- if bc == nil {
- return tx.Bucket(garbageBucketName)
- }
- return getBucket(&bc.garbage, tx, garbageBucketName)
-}
-
-func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket {
- if *cache != nil {
- return *cache
- }
-
- *cache = tx.Bucket(name)
- return *cache
-}
-
-func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
- if bc == nil {
- bucketName := make([]byte, bucketKeySize)
- bucketName = objectToExpirationEpochBucketName(cnr, bucketName)
- return tx.Bucket(bucketName)
- }
- return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr)
-}
-
-func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
- if bc == nil {
- bucketName := make([]byte, bucketKeySize)
- bucketName = primaryBucketName(cnr, bucketName)
- return tx.Bucket(bucketName)
- }
- return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr)
-}
-
-func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket {
- value, ok := (*m)[cnr]
- if ok {
- return value
- }
-
- if *m == nil {
- *m = make(map[cid.ID]*bbolt.Bucket, 1)
- }
-
- bucketName := make([]byte, bucketKeySize)
- bucketName = nameFunc(cnr, bucketName)
- (*m)[cnr] = getBucket(&value, tx, bucketName)
- return value
-}
diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go
index da27e6085..472b2affc 100644
--- a/pkg/local_object_storage/metabase/containers.go
+++ b/pkg/local_object_storage/metabase/containers.go
@@ -56,7 +56,7 @@ func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) {
return result, err
}
-func (db *DB) ContainerSize(id cid.ID) (uint64, error) {
+func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -64,22 +64,21 @@ func (db *DB) ContainerSize(id cid.ID) (uint64, error) {
return 0, ErrDegradedMode
}
- var size uint64
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- size = db.containerSize(tx, id)
+ err = db.boltDB.View(func(tx *bbolt.Tx) error {
+ size, err = db.containerSize(tx, id)
- return nil
+ return err
})
return size, metaerr.Wrap(err)
}
-func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 {
+func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) (uint64, error) {
containerVolume := tx.Bucket(containerVolumeBucketName)
key := make([]byte, cidSize)
id.Encode(key)
- return parseContainerSize(containerVolume.Get(key))
+ return parseContainerSize(containerVolume.Get(key)), nil
}
func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool {
diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go
index 8d8d91dc7..8b1874458 100644
--- a/pkg/local_object_storage/metabase/containers_test.go
+++ b/pkg/local_object_storage/metabase/containers_test.go
@@ -18,7 +18,7 @@ func TestDB_Containers(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
const N = 10
@@ -67,7 +67,7 @@ func TestDB_Containers(t *testing.T) {
assertContains(cnrs, cnr)
- require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID()))
+ require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.Address()))
cnrs, err = db.Containers(context.Background())
require.NoError(t, err)
@@ -79,7 +79,7 @@ func TestDB_ContainersCount(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
const R, T, SG, L = 10, 11, 12, 13 // amount of object per type
@@ -116,7 +116,7 @@ func TestDB_ContainerSize(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
const (
C = 3
@@ -164,7 +164,7 @@ func TestDB_ContainerSize(t *testing.T) {
require.NoError(t, metaInhume(
db,
object.AddressOf(obj),
- oidtest.ID(),
+ oidtest.Address(),
))
volume -= int(obj.PayloadSize())
diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go
index c19c65224..d6546d922 100644
--- a/pkg/local_object_storage/metabase/control.go
+++ b/pkg/local_object_storage/metabase/control.go
@@ -39,7 +39,7 @@ var (
)
// Open boltDB instance for metabase.
-func (db *DB) Open(ctx context.Context, m mode.Mode) error {
+func (db *DB) Open(_ context.Context, m mode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = m
@@ -48,16 +48,16 @@ func (db *DB) Open(ctx context.Context, m mode.Mode) error {
if m.NoMetabase() {
return nil
}
- return db.openDB(ctx, m)
+ return db.openDB(m)
}
-func (db *DB) openDB(ctx context.Context, mode mode.Mode) error {
+func (db *DB) openDB(mode mode.Mode) error {
err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
if err != nil {
- return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err)
+ return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
}
- db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
+ db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
if db.boltOptions == nil {
opts := *bbolt.DefaultOptions
@@ -65,22 +65,22 @@ func (db *DB) openDB(ctx context.Context, mode mode.Mode) error {
}
db.boltOptions.ReadOnly = mode.ReadOnly()
- return metaerr.Wrap(db.openBolt(ctx))
+ return metaerr.Wrap(db.openBolt())
}
-func (db *DB) openBolt(ctx context.Context) error {
+func (db *DB) openBolt() error {
var err error
db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions)
if err != nil {
- return fmt.Errorf("open boltDB database: %w", err)
+ return fmt.Errorf("can't open boltDB database: %w", err)
}
db.boltDB.MaxBatchDelay = db.boltBatchDelay
db.boltDB.MaxBatchSize = db.boltBatchSize
- db.log.Debug(ctx, logs.MetabaseOpenedBoltDBInstanceForMetabase)
+ db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase)
- db.log.Debug(ctx, logs.MetabaseCheckingMetabaseVersion)
+ db.log.Debug(logs.MetabaseCheckingMetabaseVersion)
return db.boltDB.View(func(tx *bbolt.Tx) error {
// The safest way to check if the metabase is fresh is to check if it has no buckets.
// However, shard info can be present. So here we check that the number of buckets is
@@ -109,7 +109,7 @@ func (db *DB) openBolt(ctx context.Context) error {
//
// Does nothing if metabase has already been initialized and filled. To roll back the database to its initial state,
// use Reset.
-func (db *DB) Init(_ context.Context) error {
+func (db *DB) Init() error {
return metaerr.Wrap(db.init(false))
}
@@ -145,27 +145,27 @@ func (db *DB) init(reset bool) error {
if reset {
err := tx.DeleteBucket(name)
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
- return fmt.Errorf("delete static bucket %s: %w", k, err)
+ return fmt.Errorf("could not delete static bucket %s: %w", k, err)
}
}
_, err := tx.CreateBucketIfNotExists(name)
if err != nil {
- return fmt.Errorf("create static bucket %s: %w", k, err)
+ return fmt.Errorf("could not create static bucket %s: %w", k, err)
}
}
for _, b := range deprecatedBuckets {
err := tx.DeleteBucket(b)
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
- return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err)
+ return fmt.Errorf("could not delete deprecated bucket %s: %w", string(b), err)
}
}
if !reset { // counters will be recalculated by refill metabase
err = syncCounter(tx, false)
if err != nil {
- return fmt.Errorf("sync object counter: %w", err)
+ return fmt.Errorf("could not sync object counter: %w", err)
}
return nil
@@ -205,7 +205,7 @@ func (db *DB) SyncCounters() error {
// Close closes boltDB instance
// and reports metabase metric.
-func (db *DB) Close(context.Context) error {
+func (db *DB) Close() error {
var err error
if db.boltDB != nil {
err = db.close()
@@ -226,7 +226,7 @@ func (db *DB) close() error {
// If there was a problem with applying new configuration, an error is returned.
//
// If a metabase was couldn't be reopened because of an error, ErrDegradedMode is returned.
-func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) {
+func (db *DB) Reload(opts ...Option) (bool, error) {
var c cfg
for i := range opts {
opts[i](&c)
@@ -236,14 +236,14 @@ func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) {
defer db.modeMtx.Unlock()
if db.mode.NoMetabase() || c.info.Path != "" && filepath.Clean(db.info.Path) != filepath.Clean(c.info.Path) {
- if err := db.Close(ctx); err != nil {
+ if err := db.Close(); err != nil {
return false, err
}
db.mode = mode.Disabled
db.metrics.SetMode(mode.ComponentDisabled)
db.info.Path = c.info.Path
- if err := db.openBolt(ctx); err != nil {
+ if err := db.openBolt(); err != nil {
return false, metaerr.Wrap(fmt.Errorf("%w: %v", ErrDegradedMode, err))
}
diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go
index d26402675..0354a5eb6 100644
--- a/pkg/local_object_storage/metabase/control_test.go
+++ b/pkg/local_object_storage/metabase/control_test.go
@@ -15,7 +15,7 @@ import (
func TestReset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
err := db.Reset()
require.NoError(t, err)
@@ -41,7 +41,7 @@ func TestReset(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
- err = metaInhume(db, addrToInhume, oidtest.ID())
+ err = metaInhume(db, addrToInhume, oidtest.Address())
require.NoError(t, err)
assertExists(addr, true, nil)
diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go
index 732f99519..275099ff2 100644
--- a/pkg/local_object_storage/metabase/counter.go
+++ b/pkg/local_object_storage/metabase/counter.go
@@ -238,26 +238,26 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
}
if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
- return fmt.Errorf("increase phy object counter: %w", err)
+ return fmt.Errorf("could not increase phy object counter: %w", err)
}
if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
- return fmt.Errorf("increase logical object counter: %w", err)
+ return fmt.Errorf("could not increase logical object counter: %w", err)
}
if isUserObject {
if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
- return fmt.Errorf("increase user object counter: %w", err)
+ return fmt.Errorf("could not increase user object counter: %w", err)
}
}
return db.incContainerObjectCounter(tx, cnrID, isUserObject)
}
-func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error {
+func (db *DB) updateShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool) error {
b := tx.Bucket(shardInfoBucket)
if b == nil {
return nil
}
- return db.updateShardObjectCounterBucket(b, typ, delta, false)
+ return db.updateShardObjectCounterBucket(b, typ, delta, inc)
}
func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error {
@@ -362,7 +362,7 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject
func syncCounter(tx *bbolt.Tx, force bool) error {
shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket)
if err != nil {
- return fmt.Errorf("get shard info bucket: %w", err)
+ return fmt.Errorf("could not get shard info bucket: %w", err)
}
shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 &&
len(shardInfoB.Get(objectLogicCounterKey)) == 8 &&
@@ -375,7 +375,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error {
containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName)
if err != nil {
- return fmt.Errorf("get container counter bucket: %w", err)
+ return fmt.Errorf("could not get container counter bucket: %w", err)
}
var addr oid.Address
@@ -428,7 +428,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error {
return nil
})
if err != nil {
- return fmt.Errorf("iterate objects: %w", err)
+ return fmt.Errorf("could not iterate objects: %w", err)
}
return setObjectCounters(counters, shardInfoB, containerCounterB)
@@ -448,7 +448,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
value := containerCounterValue(count)
err := containerCounterB.Put(key, value)
if err != nil {
- return fmt.Errorf("update phy container object counter: %w", err)
+ return fmt.Errorf("could not update phy container object counter: %w", err)
}
}
phyData := make([]byte, 8)
@@ -456,7 +456,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err := shardInfoB.Put(objectPhyCounterKey, phyData)
if err != nil {
- return fmt.Errorf("update phy object counter: %w", err)
+ return fmt.Errorf("could not update phy object counter: %w", err)
}
logData := make([]byte, 8)
@@ -464,7 +464,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err = shardInfoB.Put(objectLogicCounterKey, logData)
if err != nil {
- return fmt.Errorf("update logic object counter: %w", err)
+ return fmt.Errorf("could not update logic object counter: %w", err)
}
userData := make([]byte, 8)
@@ -472,7 +472,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err = shardInfoB.Put(objectUserCounterKey, userData)
if err != nil {
- return fmt.Errorf("update user object counter: %w", err)
+ return fmt.Errorf("could not update user object counter: %w", err)
}
return nil
@@ -492,7 +492,7 @@ func parseContainerCounterKey(buf []byte) (cid.ID, error) {
}
var cnrID cid.ID
if err := cnrID.Decode(buf); err != nil {
- return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
+ return cid.ID{}, fmt.Errorf("failed to decode container ID: %w", err)
}
return cnrID, nil
}
@@ -654,7 +654,7 @@ func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error {
return ErrReadOnlyMode
}
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Update(func(tx *bbolt.Tx) error {
b := tx.Bucket(containerVolumeBucketName)
key := make([]byte, cidSize)
@@ -737,7 +737,7 @@ func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error {
return ErrReadOnlyMode
}
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Update(func(tx *bbolt.Tx) error {
b := tx.Bucket(containerCounterBucketName)
key := make([]byte, cidSize)
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index 950385a29..d1f808a63 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -22,7 +22,7 @@ func TestCounters(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
c, err := db.ObjectCounters()
require.NoError(t, err)
require.Zero(t, c.Phy)
@@ -37,7 +37,7 @@ func TestCounters(t *testing.T) {
t.Run("put", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
oo := make([]*objectSDK.Object, 0, objCount)
for range objCount {
oo = append(oo, testutil.GenerateObject())
@@ -75,7 +75,7 @@ func TestCounters(t *testing.T) {
t.Run("delete", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
oo := putObjs(t, db, objCount, false)
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -120,7 +120,7 @@ func TestCounters(t *testing.T) {
t.Run("inhume", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
oo := putObjs(t, db, objCount, false)
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -156,18 +156,13 @@ func TestCounters(t *testing.T) {
}
var prm meta.InhumePrm
- for _, o := range inhumedObjs {
- tombAddr := oidtest.Address()
- tombAddr.SetContainer(o.Container())
+ prm.SetTombstoneAddress(oidtest.Address())
+ prm.SetAddresses(inhumedObjs...)
- prm.SetTombstoneAddress(tombAddr)
- prm.SetAddresses(o)
-
- res, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, uint64(1), res.LogicInhumed())
- require.Equal(t, uint64(1), res.UserInhumed())
- }
+ res, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(inhumedObjs)), res.LogicInhumed())
+ require.Equal(t, uint64(len(inhumedObjs)), res.UserInhumed())
c, err := db.ObjectCounters()
require.NoError(t, err)
@@ -185,7 +180,7 @@ func TestCounters(t *testing.T) {
t.Run("put_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
parObj := testutil.GenerateObject()
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -223,7 +218,7 @@ func TestCounters(t *testing.T) {
t.Run("delete_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
oo := putObjs(t, db, objCount, true)
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -265,7 +260,7 @@ func TestCounters(t *testing.T) {
t.Run("inhume_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
oo := putObjs(t, db, objCount, true)
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -301,16 +296,11 @@ func TestCounters(t *testing.T) {
}
var prm meta.InhumePrm
- for _, o := range inhumedObjs {
- tombAddr := oidtest.Address()
- tombAddr.SetContainer(o.Container())
+ prm.SetTombstoneAddress(oidtest.Address())
+ prm.SetAddresses(inhumedObjs...)
- prm.SetTombstoneAddress(tombAddr)
- prm.SetAddresses(o)
-
- _, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
- }
+ _, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
c, err := db.ObjectCounters()
require.NoError(t, err)
@@ -329,7 +319,7 @@ func TestCounters(t *testing.T) {
func TestDoublePut(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
obj := testutil.GenerateObject()
exp := make(map[cid.ID]meta.ObjectCounters)
@@ -387,7 +377,7 @@ func TestCounters_Expired(t *testing.T) {
es := &epochState{epoch}
db := newDB(t, meta.WithEpochState(es))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
oo := make([]oid.Address, objCount)
for i := range oo {
diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go
index 4474aa229..1f444a3ef 100644
--- a/pkg/local_object_storage/metabase/db.go
+++ b/pkg/local_object_storage/metabase/db.go
@@ -11,9 +11,9 @@ import (
"sync"
"time"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/mr-tron/base58"
"go.etcd.io/bbolt"
@@ -70,7 +70,7 @@ func defaultCfg() *cfg {
},
boltBatchDelay: bbolt.DefaultMaxBatchDelay,
boltBatchSize: bbolt.DefaultMaxBatchSize,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
metrics: &noopMetrics{},
}
}
diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go
index edaeb13c5..01e1ed2bc 100644
--- a/pkg/local_object_storage/metabase/db_test.go
+++ b/pkg/local_object_storage/metabase/db_test.go
@@ -6,10 +6,10 @@ import (
"strconv"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -32,17 +32,7 @@ func putBig(db *meta.DB, obj *objectSDK.Object) error {
}
func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) {
- res, err := metaSelect(db, cnr, fs, false)
- require.NoError(t, err)
- require.Len(t, res, len(exp))
-
- for i := range exp {
- require.Contains(t, res, exp[i])
- }
-}
-
-func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) {
- res, err := metaSelect(db, cnr, fs, useAttrIndex)
+ res, err := metaSelect(db, cnr, fs)
require.NoError(t, err)
require.Len(t, res, len(exp))
@@ -61,7 +51,7 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB {
)
require.NoError(t, bdb.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bdb.Init(context.Background()))
+ require.NoError(t, bdb.Init())
return bdb
}
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 9a5a6e574..00c8d06e0 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -77,6 +77,8 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) {
type referenceNumber struct {
all, cur int
+ addr oid.Address
+
obj *objectSDK.Object
}
@@ -110,14 +112,14 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
var err error
var res DeleteRes
- err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ err = db.boltDB.Update(func(tx *bbolt.Tx) error {
res, err = db.deleteGroup(tx, prm.addrs)
return err
})
if err == nil {
deleted = true
for i := range prm.addrs {
- storagelog.Write(ctx, db.log,
+ storagelog.Write(db.log,
storagelog.AddressField(prm.addrs[i]),
storagelog.OpField("metabase DELETE"))
}
@@ -161,28 +163,28 @@ func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error)
func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error {
if res.phyCount > 0 {
- err := db.decShardObjectCounter(tx, phy, res.phyCount)
+ err := db.updateShardObjectCounter(tx, phy, res.phyCount, false)
if err != nil {
- return fmt.Errorf("decrease phy object counter: %w", err)
+ return fmt.Errorf("could not decrease phy object counter: %w", err)
}
}
if res.logicCount > 0 {
- err := db.decShardObjectCounter(tx, logical, res.logicCount)
+ err := db.updateShardObjectCounter(tx, logical, res.logicCount, false)
if err != nil {
- return fmt.Errorf("decrease logical object counter: %w", err)
+ return fmt.Errorf("could not decrease logical object counter: %w", err)
}
}
if res.userCount > 0 {
- err := db.decShardObjectCounter(tx, user, res.userCount)
+ err := db.updateShardObjectCounter(tx, user, res.userCount, false)
if err != nil {
- return fmt.Errorf("decrease user object counter: %w", err)
+ return fmt.Errorf("could not decrease user object counter: %w", err)
}
}
if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil {
- return fmt.Errorf("decrease container object counter: %w", err)
+ return fmt.Errorf("could not decrease container object counter: %w", err)
}
return nil
}
@@ -259,7 +261,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
if garbageBKT != nil {
err := garbageBKT.Delete(addrKey)
if err != nil {
- return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
+ return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
}
}
return deleteSingleResult{}, nil
@@ -280,7 +282,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
if garbageBKT != nil {
err := garbageBKT.Delete(addrKey)
if err != nil {
- return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
+ return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
}
}
@@ -293,8 +295,9 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
nRef, ok := refCounter[k]
if !ok {
nRef = &referenceNumber{
- all: parentLength(tx, parAddr),
- obj: parent,
+ all: parentLength(tx, parAddr),
+ addr: parAddr,
+ obj: parent,
}
refCounter[k] = nRef
@@ -308,7 +311,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
// remove object
err = db.deleteObject(tx, obj, false)
if err != nil {
- return deleteSingleResult{}, fmt.Errorf("remove object: %w", err)
+ return deleteSingleResult{}, fmt.Errorf("could not remove object: %w", err)
}
if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil {
@@ -335,12 +338,7 @@ func (db *DB) deleteObject(
err = updateListIndexes(tx, obj, delListIndexItem)
if err != nil {
- return fmt.Errorf("remove list indexes: %w", err)
- }
-
- err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
- if err != nil {
- return fmt.Errorf("remove fake bucket tree indexes: %w", err)
+ return fmt.Errorf("can't remove list indexes: %w", err)
}
if isParent {
@@ -351,7 +349,7 @@ func (db *DB) deleteObject(
addrKey := addressKey(object.AddressOf(obj), key)
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("remove from garbage bucket: %w", err)
+ return fmt.Errorf("could not remove from garbage bucket: %w", err)
}
}
}
@@ -363,12 +361,12 @@ func (db *DB) deleteObject(
func parentLength(tx *bbolt.Tx, addr oid.Address) int {
bucketName := make([]byte, bucketKeySize)
- bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName))
+ bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:]))
if bkt == nil {
return 0
}
- lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName)))
+ lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:])))
if err != nil {
return 0
}
@@ -376,12 +374,11 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int {
return len(lst)
}
-func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) {
bkt := tx.Bucket(item.name)
if bkt != nil {
- return bkt.Delete(item.key)
+ _ = bkt.Delete(item.key) // ignore error, best effort there
}
- return nil
}
func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
@@ -406,56 +403,19 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
// if list empty, remove the key from bucket
if len(lst) == 0 {
- return bkt.Delete(item.key)
+ _ = bkt.Delete(item.key) // ignore error, best effort there
+
+ return nil
}
// if list is not empty, then update it
encodedLst, err := encodeList(lst)
if err != nil {
- return err
+ return nil // ignore error, best effort there
}
- return bkt.Put(item.key, encodedLst)
-}
-
-func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt := tx.Bucket(item.name)
- if bkt == nil {
- return nil
- }
-
- fkbtRoot := bkt.Bucket(item.key)
- if fkbtRoot == nil {
- return nil
- }
-
- if err := fkbtRoot.Delete(item.val); err != nil {
- return err
- }
-
- if hasAnyItem(fkbtRoot) {
- return nil
- }
-
- if err := bkt.DeleteBucket(item.key); err != nil {
- return err
- }
-
- if hasAnyItem(bkt) {
- return nil
- }
-
- return tx.DeleteBucket(item.name)
-}
-
-func hasAnyItem(b *bbolt.Bucket) bool {
- var hasAnyItem bool
- c := b.Cursor()
- for k, _ := c.First(); k != nil; {
- hasAnyItem = true
- break
- }
- return hasAnyItem
+ _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there
+ return nil
}
func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error {
@@ -478,47 +438,35 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error
return ErrUnknownObjectType
}
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
key: objKey,
- }); err != nil {
- return err
- }
+ })
} else {
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: parentBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
+ })
}
- if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
+ delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
name: smallBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
- if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
+ })
+ delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
name: rootBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
+ })
if expEpoch, ok := hasExpirationEpoch(obj); ok {
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: expEpochToObjectBucketName,
key: expirationEpochKey(expEpoch, cnr, addr.Object()),
- }); err != nil {
- return err
- }
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ })
+ delUniqueIndexItem(tx, namedBucketItem{
name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
key: objKey,
- }); err != nil {
- return err
- }
+ })
}
return nil
@@ -539,18 +487,16 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
+ return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
}
}
// also drop EC parent root info if current EC chunk is the last one
if !hasAnyChunks {
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
- }); err != nil {
- return err
- }
+ })
}
if ech.ParentSplitParentID() == nil {
@@ -579,15 +525,16 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
- return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
+ return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
}
}
// drop split info
- return delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)),
})
+ return nil
}
func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool {
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
index 884da23ff..66c79ecd7 100644
--- a/pkg/local_object_storage/metabase/delete_ec_test.go
+++ b/pkg/local_object_storage/metabase/delete_ec_test.go
@@ -30,8 +30,8 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
ecChunk := oidtest.ID()
@@ -39,6 +39,7 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
tombstoneID := oidtest.ID()
chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj.SetPayloadSize(uint64(10))
@@ -130,9 +131,17 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
require.Equal(t, 2, len(tombstonedObjects))
- _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
+ var tombstones []oid.Address
+ for _, tss := range tombstonedObjects {
+ tombstones = append(tombstones, tss.tomb)
+ }
+ inhumePrm.SetAddresses(tombstones...)
+ inhumePrm.SetGCMark()
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
+ require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects))
+
// GC finds tombstone as garbage and deletes it
garbageAddresses = nil
@@ -186,8 +195,8 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
ecChunks := make([]oid.ID, chunksCount)
@@ -366,9 +375,17 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool
require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
require.True(t, len(tombstonedObjects) == parentCount+chunksCount)
- _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
+ var tombstones []oid.Address
+ for _, tss := range tombstonedObjects {
+ tombstones = append(tombstones, tss.tomb)
+ }
+ inhumePrm.SetAddresses(tombstones...)
+ inhumePrm.SetGCMark()
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
+ require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects))
+
// GC finds tombstone as garbage and deletes it
garbageAddresses = nil
diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go
deleted file mode 100644
index 0329e3a73..000000000
--- a/pkg/local_object_storage/metabase/delete_meta_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package meta
-
-import (
- "bytes"
- "context"
- "path/filepath"
- "testing"
-
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/stretchr/testify/require"
- "go.etcd.io/bbolt"
-)
-
-func TestPutDeleteIndexAttributes(t *testing.T) {
- db := New([]Option{
- WithPath(filepath.Join(t.TempDir(), "metabase")),
- WithPermissions(0o600),
- WithEpochState(epochState{}),
- }...)
-
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
- obj1 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name")
- testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object")
-
- var putPrm PutPrm
- putPrm.SetObject(obj1)
-
- _, err := db.Put(context.Background(), putPrm)
- require.NoError(t, err)
-
- require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
- require.Nil(t, b)
- b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
- require.Nil(t, b)
- return nil
- }))
-
- obj2 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name")
- testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object")
-
- putPrm.SetObject(obj2)
- putPrm.SetIndexAttributes(true)
-
- _, err = db.Put(context.Background(), putPrm)
- require.NoError(t, err)
-
- objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize))
- require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
- require.NotNil(t, b)
- b = b.Bucket([]byte("CRDT-Name"))
- require.NotNil(t, b)
- require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
- b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
- require.NotNil(t, b)
- b = b.Bucket([]byte("/path/to/object"))
- require.NotNil(t, b)
- require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
- return nil
- }))
-
- var dPrm DeletePrm
- dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2))
- _, err = db.Delete(context.Background(), dPrm)
- require.NoError(t, err)
-
- require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
- require.Nil(t, b)
- b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
- require.Nil(t, b)
- return nil
- }))
-}
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index c0762a377..cb85157e7 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -18,7 +18,7 @@ import (
func TestDB_Delete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
parent := testutil.GenerateObjectWithCID(cnr)
@@ -40,12 +40,12 @@ func TestDB_Delete(t *testing.T) {
// inhume parent and child so they will be on graveyard
ts := testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object())
+ err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts))
require.NoError(t, err)
ts = testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object())
+ err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts))
require.NoError(t, err)
// delete object
@@ -65,7 +65,7 @@ func TestDB_Delete(t *testing.T) {
func TestDeleteAllChildren(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -103,12 +103,12 @@ func TestDeleteAllChildren(t *testing.T) {
func TestGraveOnlyDelete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
addr := oidtest.Address()
// inhume non-existent object by address
- require.NoError(t, metaInhume(db, addr, oidtest.ID()))
+ require.NoError(t, metaInhume(db, addr, oidtest.Address()))
// delete the object data
require.NoError(t, metaDelete(db, addr))
@@ -116,7 +116,7 @@ func TestGraveOnlyDelete(t *testing.T) {
func TestExpiredObject(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
// removing expired object should be error-free
@@ -128,7 +128,7 @@ func TestExpiredObject(t *testing.T) {
func TestDelete(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
for range 10 {
@@ -170,7 +170,7 @@ func TestDelete(t *testing.T) {
func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
addr := oidtest.Address()
diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go
index 7bd6f90a6..2e1b1dce8 100644
--- a/pkg/local_object_storage/metabase/exists.go
+++ b/pkg/local_object_storage/metabase/exists.go
@@ -19,8 +19,8 @@ import (
// ExistsPrm groups the parameters of Exists operation.
type ExistsPrm struct {
- addr oid.Address
- ecParentAddr oid.Address
+ addr oid.Address
+ paddr oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
@@ -36,9 +36,9 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) {
p.addr = addr
}
-// SetECParent is an Exists option to set objects parent.
-func (p *ExistsPrm) SetECParent(addr oid.Address) {
- p.ecParentAddr = addr
+// SetParent is an Exists option to set objects parent.
+func (p *ExistsPrm) SetParent(addr oid.Address) {
+ p.paddr = addr
}
// Exists returns the fact that the object is in the metabase.
@@ -81,7 +81,7 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
currEpoch := db.epochState.CurrentEpoch()
err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch)
+ res.exists, res.locked, err = db.exists(tx, prm.addr, prm.paddr, currEpoch)
return err
})
@@ -89,21 +89,10 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
return res, metaerr.Wrap(err)
}
-func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) {
+func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpoch uint64) (bool, bool, error) {
var locked bool
- if !ecParent.Equals(oid.Address{}) {
- st, err := objectStatus(tx, ecParent, currEpoch)
- if err != nil {
- return false, false, err
- }
- switch st {
- case 2:
- return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
- case 3:
- return false, locked, ErrObjectIsExpired
- }
-
- locked = objectLocked(tx, ecParent.Container(), ecParent.Object())
+ if !parent.Equals(oid.Address{}) {
+ locked = objectLocked(tx, parent.Container(), parent.Object())
}
// check graveyard and object expiration first
st, err := objectStatus(tx, addr, currEpoch)
@@ -153,16 +142,12 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currE
// - 2 if object is covered with tombstone;
// - 3 if object is expired.
func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
- return objectStatusWithCache(nil, tx, addr, currEpoch)
-}
-
-func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
// locked object could not be removed/marked with GC/expired
- if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) {
+ if objectLocked(tx, addr.Container(), addr.Object()) {
return 0, nil
}
- expired, err := isExpiredWithCache(bc, tx, addr, currEpoch)
+ expired, err := isExpired(tx, addr, currEpoch)
if err != nil {
return 0, err
}
@@ -171,8 +156,8 @@ func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, curr
return 3, nil
}
- graveyardBkt := getGraveyardBucket(bc, tx)
- garbageBkt := getGarbageBucket(bc, tx)
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
addrKey := addressKey(addr, make([]byte, addressKeySize))
return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil
}
@@ -232,7 +217,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e
err := splitInfo.Unmarshal(rawSplitInfo)
if err != nil {
- return nil, fmt.Errorf("unmarshal split info from root index: %w", err)
+ return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err)
}
return splitInfo, nil
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index 3045e17f1..0087c1e31 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -1,7 +1,6 @@
package meta_test
import (
- "context"
"errors"
"testing"
@@ -19,7 +18,7 @@ const currEpoch = 1000
func TestDB_Exists(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
t.Run("no object", func(t *testing.T) {
nonExist := testutil.GenerateObject()
@@ -38,7 +37,7 @@ func TestDB_Exists(t *testing.T) {
require.True(t, exists)
t.Run("removed object", func(t *testing.T) {
- err := metaInhume(db, object.AddressOf(regular), oidtest.ID())
+ err := metaInhume(db, object.AddressOf(regular), oidtest.Address())
require.NoError(t, err)
exists, err := metaExists(db, object.AddressOf(regular))
diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go
index a1351cb6f..68144d8b1 100644
--- a/pkg/local_object_storage/metabase/expired.go
+++ b/pkg/local_object_storage/metabase/expired.go
@@ -74,11 +74,9 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
}
func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
- return isExpiredWithCache(nil, tx, addr, currEpoch)
-}
-
-func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
- b := getExpiredBucket(bc, tx, addr.Container())
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName)
+ b := tx.Bucket(bucketName)
if b == nil {
return false, nil
}
diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go
index 495c1eee7..bb98745ee 100644
--- a/pkg/local_object_storage/metabase/expired_test.go
+++ b/pkg/local_object_storage/metabase/expired_test.go
@@ -13,7 +13,7 @@ import (
func TestDB_SelectExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
containerID1 := cidtest.ID()
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index 821810c09..776f5d27c 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -1,6 +1,7 @@
package meta
import (
+ "bytes"
"context"
"fmt"
"time"
@@ -88,12 +89,8 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
}
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
- return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch)
-}
-
-func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
if checkStatus {
- st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
+ st, err := objectStatus(tx, addr, currEpoch)
if err != nil {
return nil, err
}
@@ -113,13 +110,12 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key
bucketName := make([]byte, bucketKeySize)
// check in primary index
- if b := getPrimaryBucket(bc, tx, cnr); b != nil {
- if data := b.Get(key); len(data) != 0 {
- return obj, obj.Unmarshal(data)
- }
+ data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key)
+ if len(data) != 0 {
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
- data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
+ data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
if len(data) != 0 {
return nil, getECInfoError(tx, cnr, data)
}
@@ -127,13 +123,13 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key
// if not found then check in tombstone index
data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(data)
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
// if not found then check in locker index
data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(data)
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
// if not found then check if object is a virtual
@@ -191,7 +187,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
err = child.Unmarshal(data)
if err != nil {
- return nil, fmt.Errorf("unmarshal child with parent: %w", err)
+ return nil, fmt.Errorf("can't unmarshal child with parent: %w", err)
}
par := child.Parent()
@@ -220,10 +216,10 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error {
ecInfo := objectSDK.NewECInfo()
for _, key := range keys {
// check in primary index
- objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
- if len(objData) != 0 {
+ ojbData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
+ if len(ojbData) != 0 {
obj := objectSDK.New()
- if err := obj.Unmarshal(objData); err != nil {
+ if err := obj.Unmarshal(ojbData); err != nil {
return err
}
chunk := objectSDK.ECChunk{}
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 98c428410..7654d2cd8 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -25,7 +25,7 @@ import (
func TestDB_Get(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
raw := testutil.GenerateObject()
@@ -150,8 +150,9 @@ func TestDB_Get(t *testing.T) {
t.Run("get removed object", func(t *testing.T) {
obj := oidtest.Address()
+ ts := oidtest.Address()
- require.NoError(t, metaInhume(db, obj, oidtest.ID()))
+ require.NoError(t, metaInhume(db, obj, ts))
_, err := metaGet(db, obj, false)
require.True(t, client.IsErrObjectAlreadyRemoved(err))
@@ -219,6 +220,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
meta.WithMaxBatchSize(batchSize),
meta.WithMaxBatchDelay(10*time.Millisecond),
)
+ defer func() { require.NoError(b, db.Close()) }()
addrs := make([]oid.Address, 0, numOfObj)
for range numOfObj {
@@ -233,7 +235,6 @@ func benchmarkGet(b *testing.B, numOfObj int) {
}
db, addrs := prepareDb(runtime.NumCPU())
- defer func() { require.NoError(b, db.Close(context.Background())) }()
b.Run("parallel", func(b *testing.B) {
b.ReportAllocs()
@@ -253,7 +254,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
})
})
- require.NoError(b, db.Close(context.Background()))
+ require.NoError(b, db.Close())
require.NoError(b, os.RemoveAll(b.Name()))
db, addrs = prepareDb(1)
diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go
index 2f23d424c..80d40fb78 100644
--- a/pkg/local_object_storage/metabase/graveyard.go
+++ b/pkg/local_object_storage/metabase/graveyard.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -177,7 +176,7 @@ type gcHandler struct {
func (g gcHandler) handleKV(k, _ []byte) error {
o, err := garbageFromKV(k)
if err != nil {
- return fmt.Errorf("parse garbage object: %w", err)
+ return fmt.Errorf("could not parse garbage object: %w", err)
}
return g.h(o)
@@ -190,7 +189,7 @@ type graveyardHandler struct {
func (g graveyardHandler) handleKV(k, v []byte) error {
o, err := graveFromKV(k, v)
if err != nil {
- return fmt.Errorf("parse grave: %w", err)
+ return fmt.Errorf("could not parse grave: %w", err)
}
return g.h(o)
@@ -240,7 +239,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address)
func garbageFromKV(k []byte) (res GarbageObject, err error) {
err = decodeAddressFromKey(&res.addr, k)
if err != nil {
- err = fmt.Errorf("parse address: %w", err)
+ err = fmt.Errorf("could not parse address: %w", err)
}
return
@@ -256,58 +255,46 @@ func graveFromKV(k, v []byte) (res TombstonedObject, err error) {
return
}
-// InhumeTombstones deletes tombstoned objects from the
+// DropGraves deletes tombstoned objects from the
// graveyard bucket.
//
// Returns any error appeared during deletion process.
-func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) {
+func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("DropGraves", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones")
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.DropGraves")
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
if db.mode.NoMetabase() {
- return InhumeRes{}, ErrDegradedMode
+ return ErrDegradedMode
} else if db.mode.ReadOnly() {
- return InhumeRes{}, ErrReadOnlyMode
+ return ErrReadOnlyMode
}
buf := make([]byte, addressKeySize)
- prm := InhumePrm{forceRemoval: true}
- currEpoch := db.epochState.CurrentEpoch()
- var res InhumeRes
-
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
- res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)}
-
- garbageBKT := tx.Bucket(garbageBucketName)
- graveyardBKT := tx.Bucket(graveyardBucketName)
-
- bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
- if err != nil {
- return err
+ return db.boltDB.Update(func(tx *bbolt.Tx) error {
+ bkt := tx.Bucket(graveyardBucketName)
+ if bkt == nil {
+ return nil
}
- for i := range tss {
- if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil {
- return err
- }
- if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil {
+ for _, ts := range tss {
+ err := bkt.Delete(addressKey(ts.Address(), buf))
+ if err != nil {
return err
}
}
return nil
})
- return res, err
}
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index ebadecc04..75c7e2852 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -7,9 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -17,7 +14,7 @@ import (
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
var counter int
var iterGravePRM meta.GraveyardIterationPrm
@@ -44,7 +41,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
func TestDB_Iterate_OffsetNotFound(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
obj1 := testutil.GenerateObject()
obj2 := testutil.GenerateObject()
@@ -115,14 +112,13 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
- cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObjectWithCID(cnr)
- obj2 := testutil.GenerateObjectWithCID(cnr)
- obj3 := testutil.GenerateObjectWithCID(cnr)
- obj4 := testutil.GenerateObjectWithCID(cnr)
+ obj1 := testutil.GenerateObject()
+ obj2 := testutil.GenerateObject()
+ obj3 := testutil.GenerateObject()
+ obj4 := testutil.GenerateObject()
var err error
@@ -142,7 +138,6 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
- addrTombstone.SetContainer(cnr)
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
@@ -204,14 +199,13 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
- cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObjectWithCID(cnr)
- obj2 := testutil.GenerateObjectWithCID(cnr)
- obj3 := testutil.GenerateObjectWithCID(cnr)
- obj4 := testutil.GenerateObjectWithCID(cnr)
+ obj1 := testutil.GenerateObject()
+ obj2 := testutil.GenerateObject()
+ obj3 := testutil.GenerateObject()
+ obj4 := testutil.GenerateObject()
var err error
@@ -229,7 +223,6 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
- addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(
@@ -305,7 +298,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
func TestDB_IterateOverGarbage_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
// generate and put 4 objects
obj1 := testutil.GenerateObject()
@@ -395,14 +388,13 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
require.False(t, iWasCalled)
}
-func TestDB_InhumeTombstones(t *testing.T) {
+func TestDB_DropGraves(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
- cnr := cidtest.ID()
// generate and put 2 objects
- obj1 := testutil.GenerateObjectWithCID(cnr)
- obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj1 := testutil.GenerateObject()
+ obj2 := testutil.GenerateObject()
var err error
@@ -412,20 +404,8 @@ func TestDB_InhumeTombstones(t *testing.T) {
err = putBig(db, obj2)
require.NoError(t, err)
- id1, _ := obj1.ID()
- id2, _ := obj2.ID()
- ts := objectSDK.NewTombstone()
- ts.SetMembers([]oid.ID{id1, id2})
- objTs := objectSDK.New()
- objTs.SetContainerID(cnr)
- objTs.SetType(objectSDK.TypeTombstone)
-
- data, _ := ts.Marshal()
- objTs.SetPayload(data)
- require.NoError(t, objectSDK.CalculateAndSetID(objTs))
- require.NoError(t, putBig(db, objTs))
-
- addrTombstone := object.AddressOf(objTs)
+ // inhume with tombstone
+ addrTombstone := oidtest.Address()
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
@@ -448,11 +428,8 @@ func TestDB_InhumeTombstones(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 2, counter)
- res, err := db.InhumeTombstones(context.Background(), buriedTS)
+ err = db.DropGraves(context.Background(), buriedTS)
require.NoError(t, err)
- require.EqualValues(t, 1, res.LogicInhumed())
- require.EqualValues(t, 0, res.UserInhumed())
- require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID())
counter = 0
iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error {
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 76018fb61..b62accc43 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -143,20 +143,6 @@ func (p *InhumePrm) SetForceGCMark() {
p.forceRemoval = true
}
-func (p *InhumePrm) validate() error {
- if p == nil {
- return nil
- }
- if p.tomb != nil {
- for _, addr := range p.target {
- if addr.Container() != p.tomb.Container() {
- return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb)
- }
- }
- }
- return nil
-}
-
var errBreakBucketForEach = errors.New("bucket ForEach break")
// ErrLockObjectRemoval is returned when inhume operation is being
@@ -185,10 +171,6 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
- if err := prm.validate(); err != nil {
- return InhumeRes{}, err
- }
-
if db.mode.NoMetabase() {
return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
@@ -199,13 +181,13 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
inhumedByCnrID: make(map[cid.ID]ObjectCounters),
}
currEpoch := db.epochState.CurrentEpoch()
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Update(func(tx *bbolt.Tx) error {
return db.inhumeTx(tx, currEpoch, prm, &res)
})
success = err == nil
if success {
for _, addr := range prm.target {
- storagelog.Write(ctx, db.log,
+ storagelog.Write(db.log,
storagelog.AddressField(addr),
storagelog.OpField("metabase INHUME"))
}
@@ -217,93 +199,85 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes
garbageBKT := tx.Bucket(garbageBucketName)
graveyardBKT := tx.Bucket(graveyardBucketName)
- bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, &prm)
if err != nil {
return err
}
buf := make([]byte, addressKeySize)
for i := range prm.target {
- if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil {
+ id := prm.target[i].Object()
+ cnr := prm.target[i].Container()
+
+ // prevent locked objects to be inhumed
+ if !prm.forceRemoval && objectLocked(tx, cnr, id) {
+ return new(apistatus.ObjectLocked)
+ }
+
+ var lockWasChecked bool
+
+ // prevent lock objects to be inhumed
+ // if `Inhume` was called not with the
+ // `WithForceGCMark` option
+ if !prm.forceRemoval {
+ if isLockObject(tx, cnr, id) {
+ return ErrLockObjectRemoval
+ }
+
+ lockWasChecked = true
+ }
+
+ obj, err := db.get(tx, prm.target[i], buf, false, true, epoch)
+ targetKey := addressKey(prm.target[i], buf)
+ var ecErr *objectSDK.ECInfoError
+ if err == nil {
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
+ if err != nil {
+ return err
+ }
+ } else if errors.As(err, &ecErr) {
+ err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
+ if err != nil {
+ return err
+ }
+ }
+
+ if prm.tomb != nil {
+ var isTomb bool
+ isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
+ if err != nil {
+ return err
+ }
+
+ if isTomb {
+ continue
+ }
+ }
+
+ // consider checking if target is already in graveyard?
+ err = bkt.Put(targetKey, value)
+ if err != nil {
return err
}
+
+ if prm.lockObjectHandling {
+ // do not perform lock check if
+ // it was already called
+ if lockWasChecked {
+ // inhumed object is not of
+ // the LOCK type
+ continue
+ }
+
+ if isLockObject(tx, cnr, id) {
+ res.deletedLockObj = append(res.deletedLockObj, prm.target[i])
+ }
+ }
}
return db.applyInhumeResToCounters(tx, res)
}
-func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error {
- id := addr.Object()
- cnr := addr.Container()
- tx := bkt.Tx()
-
- // prevent locked objects to be inhumed
- if !prm.forceRemoval && objectLocked(tx, cnr, id) {
- return new(apistatus.ObjectLocked)
- }
-
- var lockWasChecked bool
-
- // prevent lock objects to be inhumed
- // if `Inhume` was called not with the
- // `WithForceGCMark` option
- if !prm.forceRemoval {
- if isLockObject(tx, cnr, id) {
- return ErrLockObjectRemoval
- }
-
- lockWasChecked = true
- }
-
- obj, err := db.get(tx, addr, buf, false, true, epoch)
- targetKey := addressKey(addr, buf)
- var ecErr *objectSDK.ECInfoError
- if err == nil {
- err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
- if err != nil {
- return err
- }
- } else if errors.As(err, &ecErr) {
- err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
- if err != nil {
- return err
- }
- }
-
- if prm.tomb != nil {
- var isTomb bool
- isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
- if err != nil {
- return err
- }
-
- if isTomb {
- return nil
- }
- }
-
- // consider checking if target is already in graveyard?
- err = bkt.Put(targetKey, value)
- if err != nil {
- return err
- }
-
- if prm.lockObjectHandling {
- // do not perform lock check if
- // it was already called
- if lockWasChecked {
- // inhumed object is not of
- // the LOCK type
- return nil
- }
-
- if isLockObject(tx, cnr, id) {
- res.deletedLockObj = append(res.deletedLockObj, addr)
- }
- }
- return nil
-}
-
func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes,
garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket,
ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte,
@@ -342,10 +316,10 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I
}
func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
- if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil {
+ if err := db.updateShardObjectCounter(tx, logical, res.LogicInhumed(), false); err != nil {
return err
}
- if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil {
+ if err := db.updateShardObjectCounter(tx, user, res.UserInhumed(), false); err != nil {
return err
}
@@ -362,7 +336,7 @@ func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
// 1. tombstone address if Inhume was called with
// a Tombstone
// 2. zeroValue if Inhume was called with a GC mark
-func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
+func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm *InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
if prm.tomb != nil {
targetBucket = graveyardBKT
tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
@@ -373,7 +347,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
if data != nil {
err := targetBucket.Delete(tombKey)
if err != nil {
- return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err)
+ return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err)
}
}
@@ -385,8 +359,11 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
return targetBucket, value, nil
}
-func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) {
- targetIsTomb := isTomb(graveyardBKT, addressKey)
+func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool, error) {
+ targetIsTomb, err := isTomb(graveyardBKT, key)
+ if err != nil {
+ return false, err
+ }
// do not add grave if target is a tombstone
if targetIsTomb {
@@ -395,7 +372,7 @@ func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte
// if tombstone appears object must be
// additionally marked with GC
- return false, garbageBKT.Put(addressKey, zeroValue)
+ return false, garbageBKT.Put(key, zeroValue)
}
func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error {
@@ -415,21 +392,25 @@ func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Buc
return nil
}
-func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool {
+func isTomb(graveyardBucket *bbolt.Bucket, key []byte) (bool, error) {
targetIsTomb := false
// iterate over graveyard and check if target address
// is the address of tombstone in graveyard.
- // tombstone must have the same container ID as key.
- c := graveyardBucket.Cursor()
- containerPrefix := addressKey[:cidSize]
- for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() {
+ err := graveyardBucket.ForEach(func(_, v []byte) error {
// check if graveyard has record with key corresponding
// to tombstone address (at least one)
- targetIsTomb = bytes.Equal(v, addressKey)
+ targetIsTomb = bytes.Equal(v, key)
+
if targetIsTomb {
- break
+ // break bucket iterator
+ return errBreakBucketForEach
}
+
+ return nil
+ })
+ if err != nil && !errors.Is(err, errBreakBucketForEach) {
+ return false, err
}
- return targetIsTomb
+ return targetIsTomb, nil
}
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
index 180713287..c3b1e72da 100644
--- a/pkg/local_object_storage/metabase/inhume_ec_test.go
+++ b/pkg/local_object_storage/metabase/inhume_ec_test.go
@@ -25,8 +25,8 @@ func TestInhumeECObject(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
ecChunk := oidtest.ID()
@@ -35,12 +35,14 @@ func TestInhumeECObject(t *testing.T) {
tombstoneID := oidtest.ID()
chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
chunkObj.SetPayloadSize(uint64(5))
chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
chunkObj2 := testutil.GenerateObjectWithCID(cnr)
+ chunkObj2.SetContainerID(cnr)
chunkObj2.SetID(ecChunk2)
chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj2.SetPayloadSize(uint64(10))
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index 786d10396..163fbec2a 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -9,7 +9,6 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -17,15 +16,17 @@ import (
func TestDB_Inhume(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
raw := testutil.GenerateObject()
testutil.AddAttribute(raw, "foo", "bar")
+ tombstoneID := oidtest.Address()
+
err := putBig(db, raw)
require.NoError(t, err)
- err = metaInhume(db, object.AddressOf(raw), oidtest.ID())
+ err = metaInhume(db, object.AddressOf(raw), tombstoneID)
require.NoError(t, err)
_, err = metaExists(db, object.AddressOf(raw))
@@ -37,25 +38,18 @@ func TestDB_Inhume(t *testing.T) {
func TestInhumeTombOnTomb(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
var (
err error
- cnr = cidtest.ID()
addr1 = oidtest.Address()
addr2 = oidtest.Address()
addr3 = oidtest.Address()
- addr4 = oidtest.Address()
inhumePrm meta.InhumePrm
existsPrm meta.ExistsPrm
)
- addr1.SetContainer(cnr)
- addr2.SetContainer(cnr)
- addr3.SetContainer(cnr)
- addr4.SetContainer(cnr)
-
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(addr2)
@@ -90,7 +84,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
require.True(t, client.IsErrObjectAlreadyRemoved(err))
inhumePrm.SetAddresses(addr1)
- inhumePrm.SetTombstoneAddress(addr4)
+ inhumePrm.SetTombstoneAddress(oidtest.Address())
// try to inhume addr1 (which is already a tombstone in graveyard)
_, err = db.Inhume(context.Background(), inhumePrm)
@@ -107,7 +101,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
func TestInhumeLocked(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
locked := oidtest.Address()
@@ -123,13 +117,10 @@ func TestInhumeLocked(t *testing.T) {
require.ErrorAs(t, err, &e)
}
-func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error {
+func metaInhume(db *meta.DB, target, tomb oid.Address) error {
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(target)
- var tombAddr oid.Address
- tombAddr.SetContainer(target.Container())
- tombAddr.SetObject(tomb)
- inhumePrm.SetTombstoneAddress(tombAddr)
+ inhumePrm.SetTombstoneAddress(tomb)
_, err := db.Inhume(context.Background(), inhumePrm)
return err
diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go
index 9cccd7dad..d44c51fb2 100644
--- a/pkg/local_object_storage/metabase/iterators.go
+++ b/pkg/local_object_storage/metabase/iterators.go
@@ -3,6 +3,7 @@ package meta
import (
"context"
"errors"
+ "fmt"
"strconv"
"time"
@@ -11,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
@@ -109,6 +111,70 @@ func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler)
return nil
}
+// IterateCoveredByTombstones iterates over all objects in DB which are covered
+// by tombstone with string address from tss. Locked objects are not included
+// (do not confuse with objects of type LOCK).
+//
+// If h returns ErrInterruptIterator, nil returns immediately.
+// Returns other errors of h directly.
+//
+// Does not modify tss.
+func (db *DB) IterateCoveredByTombstones(ctx context.Context, tss map[string]oid.Address, h func(oid.Address) error) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateCoveredByTombstones", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateCoveredByTombstones")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ return db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateCoveredByTombstones(tx, tss, h)
+ })
+}
+
+func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error {
+ bktGraveyard := tx.Bucket(graveyardBucketName)
+
+ err := bktGraveyard.ForEach(func(k, v []byte) error {
+ var addr oid.Address
+ if err := decodeAddressFromKey(&addr, v); err != nil {
+ return err
+ }
+ if _, ok := tss[addr.EncodeToString()]; ok {
+ var addr oid.Address
+
+ err := decodeAddressFromKey(&addr, k)
+ if err != nil {
+ return fmt.Errorf("could not parse address of the object under tombstone: %w", err)
+ }
+
+ if objectLocked(tx, addr.Container(), addr.Object()) {
+ return nil
+ }
+
+ return h(addr)
+ }
+
+ return nil
+ })
+
+ if errors.Is(err, ErrInterruptIterator) {
+ err = nil
+ }
+
+ return err
+}
+
func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error {
var cid cid.ID
var oid oid.ID
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 4c9579965..54d56d923 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -5,10 +5,10 @@ import (
"strconv"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -17,7 +17,7 @@ import (
func TestDB_IterateExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
const epoch = 13
@@ -66,3 +66,60 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt
return object2.AddressOf(obj)
}
+
+func TestDB_IterateCoveredByTombstones(t *testing.T) {
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
+
+ ts := oidtest.Address()
+ protected1 := oidtest.Address()
+ protected2 := oidtest.Address()
+ protectedLocked := oidtest.Address()
+ garbage := oidtest.Address()
+
+ var prm meta.InhumePrm
+ var err error
+
+ prm.SetAddresses(protected1, protected2, protectedLocked)
+ prm.SetTombstoneAddress(ts)
+
+ _, err = db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetAddresses(garbage)
+ prm.SetGCMark()
+
+ _, err = db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+
+ var handled []oid.Address
+
+ tss := map[string]oid.Address{
+ ts.EncodeToString(): ts,
+ }
+
+ err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
+ handled = append(handled, addr)
+ return nil
+ })
+ require.NoError(t, err)
+
+ require.Len(t, handled, 3)
+ require.Contains(t, handled, protected1)
+ require.Contains(t, handled, protected2)
+ require.Contains(t, handled, protectedLocked)
+
+ err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
+ require.NoError(t, err)
+
+ handled = handled[:0]
+
+ err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
+ handled = append(handled, addr)
+ return nil
+ })
+ require.NoError(t, err)
+
+ require.Len(t, handled, 2)
+ require.NotContains(t, handled, protectedLocked)
+}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 2a0bd7f6a..b4326a92c 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -1,7 +1,6 @@
package meta
import (
- "bytes"
"context"
"time"
@@ -62,33 +61,8 @@ func (l ListRes) Cursor() *Cursor {
return l.cursor
}
-// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
-type IterateOverContainersPrm struct {
- // Handler function executed upon containers in db.
- Handler func(context.Context, objectSDK.Type, cid.ID) error
-}
-
-// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
-type IterateOverObjectsInContainerPrm struct {
- // ObjectType type of objects to iterate over.
- ObjectType objectSDK.Type
- // ContainerID container for objects to iterate over.
- ContainerID cid.ID
- // Handler function executed upon objects in db.
- Handler func(context.Context, *objectcore.Info) error
-}
-
-// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
-type CountAliveObjectsInContainerPrm struct {
- // ObjectType type of objects to iterate over.
- ObjectType objectSDK.Type
- // ContainerID container for objects to iterate over.
- ContainerID cid.ID
-}
-
// ListWithCursor lists physical objects available in metabase starting from
-// cursor. Includes objects of all types. Does not include inhumed and expired
-// objects.
+// cursor. Includes objects of all types. Does not include inhumed objects.
// Use cursor value from response for consecutive requests.
//
// Returns ErrEndOfListing if there are no more objects to return or count
@@ -139,12 +113,11 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
var containerID cid.ID
var offset []byte
- bc := newBucketCache()
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
rawAddr := make([]byte, cidSize, addressKeySize)
- currEpoch := db.epochState.CurrentEpoch()
-
loop:
for ; name != nil; name, _ = c.Next() {
cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name)
@@ -168,8 +141,8 @@ loop:
bkt := tx.Bucket(name)
if bkt != nil {
copy(rawAddr, cidRaw)
- result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
- result, count, cursor, threshold, currEpoch)
+ result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
+ result, count, cursor, threshold)
if err != nil {
return nil, nil, err
}
@@ -187,7 +160,8 @@ loop:
if offset != nil {
// new slice is much faster but less memory efficient
// we need to copy, because offset exists during bbolt tx
- cursor.inBucketOffset = bytes.Clone(offset)
+ cursor.inBucketOffset = make([]byte, len(offset))
+ copy(cursor.inBucketOffset, offset)
}
if len(result) == 0 {
@@ -196,29 +170,29 @@ loop:
// new slice is much faster but less memory efficient
// we need to copy, because bucketName exists during bbolt tx
- cursor.bucketName = bytes.Clone(bucketName)
+ cursor.bucketName = make([]byte, len(bucketName))
+ copy(cursor.bucketName, bucketName)
return result, cursor, nil
}
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects.
-func selectNFromBucket(
- bc *bucketCache,
- bkt *bbolt.Bucket, // main bucket
+func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
objType objectSDK.Type, // type of the objects stored in the main bucket
+ graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID
to []objectcore.Info, // listing result
limit int, // stop listing at `limit` items in result
cursor *Cursor, // start from cursor object
threshold bool, // ignore cursor and start immediately
- currEpoch uint64,
) ([]objectcore.Info, []byte, *Cursor, error) {
if cursor == nil {
cursor = new(Cursor)
}
+ count := len(to)
c := bkt.Cursor()
k, v := c.First()
@@ -230,7 +204,7 @@ func selectNFromBucket(
}
for ; k != nil; k, v = c.Next() {
- if len(to) >= limit {
+ if count >= limit {
break
}
@@ -240,25 +214,17 @@ func selectNFromBucket(
}
offset = k
- graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
- garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue
}
- var o objectSDK.Object
- if err := o.Unmarshal(v); err != nil {
- return nil, nil, nil, err
- }
-
- expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
- if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
- continue
- }
-
var isLinkingObj bool
var ecInfo *objectcore.ECInfo
if objType == objectSDK.TypeRegular {
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return nil, nil, nil, err
+ }
isLinkingObj = isLinkObject(&o)
ecHeader := o.ECHeader()
if ecHeader != nil {
@@ -274,6 +240,7 @@ func selectNFromBucket(
a.SetContainer(cnt)
a.SetObject(obj)
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
+ count++
}
return to, offset, cursor, nil
@@ -292,211 +259,3 @@ func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte)
return rawID, name[0]
}
-
-// IterateOverContainers lists physical containers available in metabase starting from first.
-func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers",
- trace.WithAttributes(
- attribute.Bool("has_handler", prm.Handler != nil),
- ))
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateOverContainers(ctx, tx, prm)
- })
- success = err == nil
- return metaerr.Wrap(err)
-}
-
-func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error {
- var containerID cid.ID
- for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} {
- c := tx.Cursor()
- for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() {
- cidRaw, _ := parseContainerIDWithPrefix(&containerID, name)
- if cidRaw == nil {
- continue
- }
- var cnt cid.ID
- copy(cnt[:], containerID[:])
- var objType objectSDK.Type
- switch prefix[0] {
- case primaryPrefix:
- objType = objectSDK.TypeRegular
- case lockersPrefix:
- objType = objectSDK.TypeLock
- case tombstonePrefix:
- objType = objectSDK.TypeTombstone
- default:
- continue
- }
- err := prm.Handler(ctx, objType, cnt)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first.
-func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer",
- trace.WithAttributes(
- attribute.Bool("has_handler", prm.Handler != nil),
- ))
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateOverObjectsInContainer(ctx, tx, prm)
- })
- success = err == nil
- return metaerr.Wrap(err)
-}
-
-func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error {
- var prefix byte
- switch prm.ObjectType {
- case objectSDK.TypeRegular:
- prefix = primaryPrefix
- case objectSDK.TypeLock:
- prefix = lockersPrefix
- case objectSDK.TypeTombstone:
- prefix = tombstonePrefix
- default:
- return nil
- }
- bucketName := []byte{prefix}
- bucketName = append(bucketName, prm.ContainerID[:]...)
-
- bkt := tx.Bucket(bucketName)
- if bkt == nil {
- return nil
- }
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
- c := bkt.Cursor()
- k, v := c.First()
-
- for ; k != nil; k, v = c.Next() {
- var obj oid.ID
- if err := obj.Decode(k); err != nil {
- break
- }
-
- if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
- continue
- }
-
- var isLinkingObj bool
- var ecInfo *objectcore.ECInfo
- if prm.ObjectType == objectSDK.TypeRegular {
- var o objectSDK.Object
- if err := o.Unmarshal(v); err != nil {
- return err
- }
- isLinkingObj = isLinkObject(&o)
- ecHeader := o.ECHeader()
- if ecHeader != nil {
- ecInfo = &objectcore.ECInfo{
- ParentID: ecHeader.Parent(),
- Index: ecHeader.Index(),
- Total: ecHeader.Total(),
- }
- }
- }
-
- var a oid.Address
- a.SetContainer(prm.ContainerID)
- a.SetObject(obj)
- objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
- err := prm.Handler(ctx, &objInfo)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
-func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket")
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return 0, ErrDegradedMode
- }
-
- var prefix byte
- switch prm.ObjectType {
- case objectSDK.TypeRegular:
- prefix = primaryPrefix
- case objectSDK.TypeLock:
- prefix = lockersPrefix
- case objectSDK.TypeTombstone:
- prefix = tombstonePrefix
- default:
- return 0, nil
- }
- bucketName := []byte{prefix}
- bucketName = append(bucketName, prm.ContainerID[:]...)
- var count uint64
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(bucketName)
- if bkt == nil {
- return nil
- }
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
- c := bkt.Cursor()
- k, _ := c.First()
- for ; k != nil; k, _ = c.Next() {
- if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
- continue
- }
- count++
- }
- return nil
- })
- success = err == nil
- return count, metaerr.Wrap(err)
-}
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index 02985991c..6207497b1 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -3,17 +3,13 @@ package meta_test
import (
"context"
"errors"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
@@ -21,8 +17,6 @@ import (
func BenchmarkListWithCursor(b *testing.B) {
db := listWithCursorPrepareDB(b)
- defer func() { require.NoError(b, db.Close(context.Background())) }()
-
b.Run("1 item", func(b *testing.B) {
benchmarkListWithCursor(b, db, 1)
})
@@ -38,6 +32,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
NoSync: true,
})) // faster single-thread generation
+ defer func() { require.NoError(b, db.Close()) }()
obj := testutil.GenerateObject()
for i := range 100_000 { // should be a multiple of all batch sizes
@@ -59,7 +54,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
for range b.N {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
- if !errors.Is(err, meta.ErrEndOfListing) {
+ if err != meta.ErrEndOfListing {
b.Fatalf("error: %v", err)
}
prm.SetCursor(nil)
@@ -74,15 +69,13 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
func TestLisObjectsWithCursor(t *testing.T) {
t.Parallel()
- const (
- currEpoch = 100
- expEpoch = currEpoch - 1
- containers = 5
- total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired
- )
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
- db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ const (
+ containers = 5
+ total = containers * 4 // regular + ts + child + lock
+ )
expected := make([]object.Info, 0, total)
@@ -117,7 +110,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
ts := testutil.GenerateObjectWithCID(containerID)
- err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object())
+ err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts))
require.NoError(t, err)
// add one child object (do not include parent into expected)
@@ -132,26 +125,6 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, child)
require.NoError(t, err)
expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
-
- // add expired object (do not include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
- require.NoError(t, metaPut(db, obj, nil))
-
- // add non-expired object (include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch))
- require.NoError(t, metaPut(db, obj, nil))
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
-
- // add locked expired object (include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- objID := oidtest.ID()
- obj.SetID(objID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
- require.NoError(t, metaPut(db, obj, nil))
- require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID}))
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
}
t.Run("success with various count", func(t *testing.T) {
@@ -189,7 +162,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
const total = 5
@@ -246,59 +219,3 @@ func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]objec
r, err := db.ListWithCursor(context.Background(), listPrm)
return r.AddressList(), r.Cursor(), err
}
-
-func TestIterateOver(t *testing.T) {
- t.Parallel()
-
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- const total uint64 = 5
- for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} {
- var expected []*objectSDK.Object
- // fill metabase with objects
- cid := cidtest.ID()
- for range total {
- obj := testutil.GenerateObjectWithCID(cid)
- obj.SetType(typ)
- err := metaPut(db, obj, nil)
- require.NoError(t, err)
- expected = append(expected, obj)
- }
-
- var metaIter meta.IterateOverObjectsInContainerPrm
- var count uint64
- metaIter.Handler = func(context.Context, *object.Info) error {
- count++
- return nil
- }
- metaIter.ContainerID = cid
- metaIter.ObjectType = typ
- err := db.IterateOverObjectsInContainer(context.Background(), metaIter)
- require.NoError(t, err)
- require.Equal(t, total, count)
-
- var metaCount meta.CountAliveObjectsInContainerPrm
- metaCount.ContainerID = cid
- metaCount.ObjectType = typ
- res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount)
- require.NoError(t, err)
- require.Equal(t, res, total)
-
- err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1]))
- require.NoError(t, err)
-
- res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount)
- require.NoError(t, err)
- require.Equal(t, uint64(3), res)
- }
- var count int
- var metaPrm meta.IterateOverContainersPrm
- metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error {
- count++
- return nil
- }
- err := db.IterateOverContainers(context.Background(), metaPrm)
- require.NoError(t, err)
- require.Equal(t, 3, count)
-}
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index f4cb9e53b..732ba426d 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -4,10 +4,8 @@ import (
"bytes"
"context"
"fmt"
- "slices"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -64,7 +62,9 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.
return ErrReadOnlyMode
}
- assert.False(len(locked) == 0, "empty locked list")
+ if len(locked) == 0 {
+ panic("empty locked list")
+ }
err := db.lockInternal(locked, cnr, locker)
success = err == nil
@@ -78,7 +78,7 @@ func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error {
}
key := make([]byte, cidSize)
- return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error {
if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular {
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
}
@@ -143,7 +143,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
var unlockedObjects []oid.Address
- if err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ if err := db.boltDB.Update(func(tx *bbolt.Tx) error {
for i := range lockers {
unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object())
if err != nil {
@@ -162,11 +162,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
// checks if specified object is locked in the specified container.
func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- return objectLockedWithCache(nil, tx, idCnr, idObj)
-}
-
-func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- bucketLocked := getLockedBucket(bc, tx)
+ bucketLocked := tx.Bucket(bucketNameLocked)
if bucketLocked != nil {
key := make([]byte, cidSize)
idCnr.Encode(key)
@@ -180,7 +176,7 @@ func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oi
}
// return `LOCK` id's if specified object is locked in the specified container.
-func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
+func getLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
var lockers []oid.ID
bucketLocked := tx.Bucket(bucketNameLocked)
if bucketLocked != nil {
@@ -254,7 +250,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres
unlockedObjects = append(unlockedObjects, addr)
} else {
// exclude locker
- keyLockers = slices.Delete(keyLockers, i, i+1)
+ keyLockers = append(keyLockers[:i], keyLockers[i+1:]...)
v, err = encodeList(keyLockers)
if err != nil {
@@ -355,20 +351,20 @@ func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, e
return res, err
}
-// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found
+// GetLocked return `LOCK` id's if provided object is locked by any `LOCK`. Not found
// object is considered as non-locked.
//
// Returns only non-logical errors related to underlying database.
-func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
+func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("GetLocked", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks",
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocked",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
@@ -381,7 +377,7 @@ func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err
return res, ErrDegradedMode
}
err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res, err = getLocks(tx, addr.Container(), addr.Object())
+ res, err = getLocked(tx, addr.Container(), addr.Object())
return nil
}))
success = err == nil
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 341ff9ad1..62a109b02 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -21,7 +21,7 @@ func TestDB_Lock(t *testing.T) {
cnr := cidtest.ID()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
t.Run("empty locked list", func(t *testing.T) {
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
@@ -73,9 +73,7 @@ func TestDB_Lock(t *testing.T) {
_, err := db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- tombAddr := oidtest.Address()
- tombAddr.SetContainer(objAddr.Container())
- inhumePrm.SetTombstoneAddress(tombAddr)
+ inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
@@ -91,9 +89,7 @@ func TestDB_Lock(t *testing.T) {
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- tombAddr = oidtest.Address()
- tombAddr.SetContainer(objAddr.Container())
- inhumePrm.SetTombstoneAddress(tombAddr)
+ inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
})
@@ -107,7 +103,7 @@ func TestDB_Lock(t *testing.T) {
var objLockedErr *apistatus.ObjectLocked
// try to inhume locked object using tombstone
- err := metaInhume(db, objAddr, lockAddr.Object())
+ err := metaInhume(db, objAddr, lockAddr)
require.ErrorAs(t, err, &objLockedErr)
// free locked object
@@ -187,7 +183,7 @@ func TestDB_Lock_Expired(t *testing.T) {
es := &epochState{e: 123}
db := newDB(t, meta.WithEpochState(es))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
// put an object
addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
@@ -209,7 +205,7 @@ func TestDB_IsLocked(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
// existing and locked objs
diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go
index 7edb96384..2032ed6b2 100644
--- a/pkg/local_object_storage/metabase/mode.go
+++ b/pkg/local_object_storage/metabase/mode.go
@@ -1,7 +1,6 @@
package meta
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -9,7 +8,7 @@ import (
// SetMode sets the metabase mode of operation.
// If the mode assumes no operation metabase, the database is closed.
-func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
+func (db *DB) SetMode(m mode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
@@ -18,20 +17,20 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
}
if !db.mode.NoMetabase() {
- if err := db.Close(ctx); err != nil {
- return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
if m.NoMetabase() {
db.boltDB = nil
} else {
- err := db.openDB(ctx, m)
+ err := db.openDB(m)
if err == nil && !m.ReadOnly() {
- err = db.Init(ctx)
+ err = db.Init()
}
if err != nil {
- return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go
index 28b42283f..1b9f60055 100644
--- a/pkg/local_object_storage/metabase/mode_test.go
+++ b/pkg/local_object_storage/metabase/mode_test.go
@@ -25,13 +25,13 @@ func Test_Mode(t *testing.T) {
require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init(context.Background()))
+ require.NoError(t, bdb.Init())
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close(context.Background()))
+ require.NoError(t, bdb.Close())
require.NoError(t, bdb.Open(context.Background(), mode.Degraded))
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init(context.Background()))
+ require.NoError(t, bdb.Init())
require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close(context.Background()))
+ require.NoError(t, bdb.Close())
}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 5e1bbfe9e..ff79a0387 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -9,12 +9,12 @@ import (
"strconv"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -35,8 +35,6 @@ type PutPrm struct {
obj *objectSDK.Object
id []byte
-
- indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
@@ -54,13 +52,10 @@ func (p *PutPrm) SetStorageID(id []byte) {
p.id = id
}
-func (p *PutPrm) SetIndexAttributes(v bool) {
- p.indexAttributes = v
-}
-
var (
- ErrUnknownObjectType = errors.New("unknown object type")
- ErrIncorrectRootObject = errors.New("invalid root object")
+ ErrUnknownObjectType = errors.New("unknown object type")
+ ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it")
+ ErrIncorrectRootObject = errors.New("invalid root object")
)
// Put saves object header in metabase. Object payload expected to be cut.
@@ -95,12 +90,12 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
var e error
- res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes)
+ res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch)
return e
})
if err == nil {
success = true
- storagelog.Write(ctx, db.log,
+ storagelog.Write(db.log,
storagelog.AddressField(objectCore.AddressOf(prm.obj)),
storagelog.OpField("metabase PUT"))
}
@@ -113,22 +108,15 @@ func (db *DB) put(tx *bbolt.Tx,
id []byte,
si *objectSDK.SplitInfo,
currEpoch uint64,
- indexAttributes bool,
) (PutRes, error) {
cnr, ok := obj.ContainerID()
if !ok {
return PutRes{}, errors.New("missing container in object")
}
- var ecParentAddress oid.Address
- if ecHeader := obj.ECHeader(); ecHeader != nil {
- ecParentAddress.SetContainer(cnr)
- ecParentAddress.SetObject(ecHeader.Parent())
- }
-
isParent := si != nil
- exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch)
+ exists, _, err := db.exists(tx, objectCore.AddressOf(obj), oid.Address{}, currEpoch)
var splitInfoError *objectSDK.SplitInfoError
if errors.As(err, &splitInfoError) {
@@ -141,7 +129,7 @@ func (db *DB) put(tx *bbolt.Tx,
return PutRes{}, db.updateObj(tx, obj, id, si, isParent)
}
- return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes)
+ return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch)
}
func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error {
@@ -164,14 +152,14 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje
return nil
}
-func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error {
+func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64) error {
if par := obj.Parent(); par != nil && !isParent { // limit depth by two
parentSI, err := splitInfoFromObject(obj)
if err != nil {
return err
}
- _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes)
+ _, err = db.put(tx, par, id, parentSI, currEpoch)
if err != nil {
return err
}
@@ -179,19 +167,12 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
err := putUniqueIndexes(tx, obj, si, id)
if err != nil {
- return fmt.Errorf("put unique indexes: %w", err)
+ return fmt.Errorf("can't put unique indexes: %w", err)
}
err = updateListIndexes(tx, obj, putListIndexItem)
if err != nil {
- return fmt.Errorf("put list indexes: %w", err)
- }
-
- if indexAttributes {
- err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
- if err != nil {
- return fmt.Errorf("put fake bucket tree indexes: %w", err)
- }
+ return fmt.Errorf("can't put list indexes: %w", err)
}
// update container volume size estimation
@@ -249,7 +230,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad
}
rawObject, err := obj.CutPayload().Marshal()
if err != nil {
- return fmt.Errorf("marshal object header: %w", err)
+ return fmt.Errorf("can't marshal object header: %w", err)
}
return putUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
@@ -400,56 +381,16 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
return nil
}
-var indexedAttributes = map[string]struct{}{
- "S3-Access-Box-CRDT-Name": {},
- objectSDK.AttributeFilePath: {},
-}
-
-// IsAtrributeIndexed returns True if attribute is indexed by metabase.
-func IsAtrributeIndexed(attr string) bool {
- _, found := indexedAttributes[attr]
- return found
-}
-
-func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
- id, _ := obj.ID()
- cnr, _ := obj.ContainerID()
- objKey := objectKey(id, make([]byte, objectKeySize))
-
- key := make([]byte, bucketKeySize)
- var attrs []objectSDK.Attribute
- if obj.ECHeader() != nil {
- attrs = obj.ECHeader().ParentAttributes()
- objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize))
- } else {
- attrs = obj.Attributes()
- }
-
- // user specified attributes
- for i := range attrs {
- if !IsAtrributeIndexed(attrs[i].Key()) {
- continue
- }
- key = attributeBucketName(cnr, attrs[i].Key(), key)
- err := f(tx, namedBucketItem{
- name: key,
- key: []byte(attrs[i].Value()),
- val: objKey,
- })
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
attributes := obj.Attributes()
if ech := obj.ECHeader(); ech != nil {
attributes = ech.ParentAttributes()
}
for _, attr := range attributes {
+ if attr.Key() == objectV2.SysAttributeExpEpochNeoFS {
+ expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
+ return expEpoch, err == nil
+ }
if attr.Key() == objectV2.SysAttributeExpEpoch {
expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
return expEpoch, err == nil
@@ -474,7 +415,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck
func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
}
data, err := update(bkt.Get(item.key), item.val)
@@ -488,36 +429,22 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil })
}
-func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt, err := createBucketLikelyExists(tx, item.name)
- if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
- }
-
- fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
- if err != nil {
- return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err)
- }
-
- return fkbtRoot.Put(item.val, zeroValue)
-}
-
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
}
lst, err := decodeList(bkt.Get(item.key))
if err != nil {
- return fmt.Errorf("decode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("can't decode leaf list %v: %w", item.key, err)
}
lst = append(lst, item.val)
encodedLst, err := encodeList(lst)
if err != nil {
- return fmt.Errorf("encode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("can't encode leaf list %v: %w", item.key, err)
}
return bkt.Put(item.key, encodedLst)
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index f37ed4cf2..914f5ef06 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -46,7 +46,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(runtime.NumCPU()))
- defer func() { require.NoError(b, db.Close(context.Background())) }()
+ defer func() { require.NoError(b, db.Close()) }()
// Ensure the benchmark is bound by CPU and not waiting batch-delay time.
b.SetParallelism(1)
@@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(1))
- defer func() { require.NoError(b, db.Close(context.Background())) }()
+ defer func() { require.NoError(b, db.Close()) }()
var index atomic.Int64
index.Store(-1)
objs := prepareObjects(b.N)
@@ -84,7 +84,7 @@ func BenchmarkPut(b *testing.B) {
func TestDB_PutBlobovniczaUpdate(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
raw1 := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4}
diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go
index 5f0956f0b..993079dce 100644
--- a/pkg/local_object_storage/metabase/reset_test.go
+++ b/pkg/local_object_storage/metabase/reset_test.go
@@ -30,14 +30,14 @@ func TestResetDropsContainerBuckets(t *testing.T) {
)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
for idx := range 100 {
var putPrm PutPrm
putPrm.SetObject(testutil.GenerateObject())
- putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx))
+ putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
_, err := db.Put(context.Background(), putPrm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 60da50671..ed43fc41f 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -8,15 +8,17 @@ import (
"strings"
"time"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
)
type (
@@ -35,9 +37,8 @@ type (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
- useAttributeIndex bool
+ cnr cid.ID
+ filters objectSDK.SearchFilters
}
// SelectRes groups the resulting values of Select operation.
@@ -55,10 +56,6 @@ func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
-func (p *SelectPrm) SetUseAttributeIndex(v bool) {
- p.useAttributeIndex = v
-}
-
// AddressList returns list of addresses of the selected objects.
func (r SelectRes) AddressList() []oid.Address {
return r.addrList
@@ -95,14 +92,14 @@ func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err err
currEpoch := db.epochState.CurrentEpoch()
return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex)
+ res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch)
success = err == nil
return err
}))
}
-func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) {
- group, err := groupFilters(fs, useAttributeIndex)
+func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64) ([]oid.Address, error) {
+ group, err := groupFilters(fs)
if err != nil {
return nil, err
}
@@ -131,7 +128,6 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
res := make([]oid.Address, 0, len(mAddr))
- bc := newBucketCache()
for a, ind := range mAddr {
if ind != expLen {
continue // ignore objects with unmatched fast filters
@@ -146,7 +142,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(id)
- st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
+ st, err := objectStatus(tx, addr, currEpoch)
if err != nil {
return nil, err
}
@@ -154,7 +150,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
continue // ignore removed objects
}
- addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch)
+ addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch)
if !match {
continue // ignore objects with unmatched slow filters
}
@@ -222,13 +218,7 @@ func (db *DB) selectFastFilter(
selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum)
- default: // user attribute
- bucketName := attributeBucketName(cnr, f.Header(), bucketName)
- if f.Operation() == objectSDK.MatchNotPresent {
- selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum)
- } else {
- db.selectFromFKBT(tx, bucketName, f, to, fNum)
- }
+ default:
}
}
@@ -238,15 +228,6 @@ var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{
v2object.TypeLock.String(): {bucketNameLockers},
}
-func allBucketNames(cnr cid.ID) (names [][]byte) {
- for _, fns := range mBucketNaming {
- for _, fn := range fns {
- names = append(names, fn(cnr, make([]byte, bucketKeySize)))
- }
- }
- return
-}
-
func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal string) (names [][]byte) {
appendNames := func(key string) {
fns, ok := mBucketNaming[key]
@@ -278,76 +259,6 @@ func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal str
return
}
-func (db *DB) selectFromFKBT(
- tx *bbolt.Tx,
- name []byte, // fkbt root bucket name
- f objectSDK.SearchFilter, // filter for operation and value
- to map[string]int, // resulting cache
- fNum int, // index of filter
-) { //
- matchFunc, ok := db.matchers[f.Operation()]
- if !ok {
- return
- }
-
- fkbtRoot := tx.Bucket(name)
- if fkbtRoot == nil {
- return
- }
-
- _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
- fkbtLeaf := fkbtRoot.Bucket(k)
- if fkbtLeaf == nil {
- return nil
- }
-
- return fkbtLeaf.ForEach(func(k, _ []byte) error {
- markAddressInCache(to, fNum, string(k))
-
- return nil
- })
- })
-}
-
-// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
-// resulting cache.
-func selectOutsideFKBT(
- tx *bbolt.Tx,
- incl [][]byte, // buckets
- name []byte, // fkbt root bucket name
- to map[string]int, // resulting cache
- fNum int, // index of filter
-) {
- mExcl := make(map[string]struct{})
-
- bktExcl := tx.Bucket(name)
- if bktExcl != nil {
- _ = bktExcl.ForEachBucket(func(k []byte) error {
- exclBktLeaf := bktExcl.Bucket(k)
- return exclBktLeaf.ForEach(func(k, _ []byte) error {
- mExcl[string(k)] = struct{}{}
-
- return nil
- })
- })
- }
-
- for i := range incl {
- bktIncl := tx.Bucket(incl[i])
- if bktIncl == nil {
- continue
- }
-
- _ = bktIncl.ForEach(func(k, _ []byte) error {
- if _, ok := mExcl[string(k)]; !ok {
- markAddressInCache(to, fNum, string(k))
- }
-
- return nil
- })
- }
-}
-
// selectFromList looks into index to find list of addresses to add in
// resulting cache.
func (db *DB) selectFromList(
@@ -371,17 +282,24 @@ func (db *DB) selectFromList(
case objectSDK.MatchStringEqual:
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
if err != nil {
+ db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()))
return
}
default:
fMatch, ok := db.matchers[op]
if !ok {
+ db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op)))
+
return
}
if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error {
l, err := decodeList(val)
if err != nil {
+ db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf,
+ zap.String("error", err.Error()),
+ )
+
return err
}
@@ -389,6 +307,10 @@ func (db *DB) selectFromList(
return nil
}); err != nil {
+ db.log.Debug(logs.MetabaseCantIterateOverTheBucket,
+ zap.String("error", err.Error()),
+ )
+
return
}
}
@@ -430,6 +352,10 @@ func (db *DB) selectObjectID(
default:
fMatch, ok := db.matchers[op]
if !ok {
+ db.log.Debug(logs.MetabaseUnknownOperation,
+ zap.Uint32("operation", uint32(f.Operation())),
+ )
+
return
}
@@ -440,25 +366,31 @@ func (db *DB) selectObjectID(
return
}
- _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
+ err := fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
var id oid.ID
if err := id.Decode(k); err == nil {
appendOID(id)
}
return nil
})
+ if err != nil {
+ db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets,
+ zap.String("error", err.Error()),
+ )
+ }
}
}
}
// matchSlowFilters return true if object header is matched by all slow filters.
-func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
+func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
result := addr
if len(f) == 0 {
return result, true
}
- obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch)
+ buf := make([]byte, addressKeySize)
+ obj, err := db.get(tx, addr, buf, true, false, currEpoch)
if err != nil {
return result, false
}
@@ -469,26 +401,17 @@ func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address,
case v2object.FilterHeaderVersion:
data = []byte(obj.Version().String())
case v2object.FilterHeaderHomomorphicHash:
- if isECChunk {
- return result, false // EC chunk and EC parent hashes are incomparable
- }
cs, _ := obj.PayloadHomomorphicHash()
data = cs.Value()
case v2object.FilterHeaderCreationEpoch:
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.CreationEpoch())
case v2object.FilterHeaderPayloadLength:
- if isECChunk {
- return result, false // EC chunk and EC parent payload lengths are incomparable
- }
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.PayloadSize())
case v2object.FilterHeaderOwnerID:
data = []byte(obj.OwnerID().EncodeToString())
case v2object.FilterHeaderPayloadHash:
- if isECChunk {
- return result, false // EC chunk and EC parent payload hashes are incomparable
- }
cs, _ := obj.PayloadChecksum()
data = cs.Value()
default: // user attribute
@@ -516,29 +439,6 @@ func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address,
return result, true
}
-func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
- buf := make([]byte, addressKeySize)
- obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch)
- if err != nil {
- var ecInfoError *objectSDK.ECInfoError
- if errors.As(err, &ecInfoError) {
- for _, chunk := range ecInfoError.ECInfo().Chunks {
- var objID oid.ID
- if err = objID.ReadFromV2(chunk.ID); err != nil {
- continue
- }
- addr.SetObject(objID)
- obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch)
- if err == nil {
- return obj, true, nil
- }
- }
- }
- return nil, false, err
- }
- return obj, false, nil
-}
-
func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
objectAttributes := obj.Attributes()
if ech := obj.ECHeader(); ech != nil {
@@ -555,7 +455,7 @@ func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
// groupFilters divides filters in two groups: fast and slow. Fast filters
// processed by indexes and slow filters processed after by unmarshaling
// object headers.
-func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) {
+func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
res := filterGroup{
fastFilters: make(objectSDK.SearchFilters, 0, len(filters)),
slowFilters: make(objectSDK.SearchFilters, 0, len(filters)),
@@ -566,7 +466,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt
case v2object.FilterHeaderContainerID: // support deprecated field
err := res.cnr.DecodeString(filters[i].Value())
if err != nil {
- return filterGroup{}, fmt.Errorf("parse container id: %w", err)
+ return filterGroup{}, fmt.Errorf("can't parse container id: %w", err)
}
res.withCnrFilter = true
@@ -580,11 +480,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt
v2object.FilterPropertyPhy:
res.fastFilters = append(res.fastFilters, filters[i])
default:
- if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) {
- res.fastFilters = append(res.fastFilters, filters[i])
- } else {
- res.slowFilters = append(res.slowFilters, filters[i])
- }
+ res.slowFilters = append(res.slowFilters, filters[i])
}
}
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index ce2156d2e..6469bbdbc 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -7,10 +7,10 @@ import (
"strconv"
"testing"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -26,19 +26,9 @@ import (
func TestDB_SelectUserAttributes(t *testing.T) {
t.Parallel()
- t.Run("with_index", func(t *testing.T) {
- testSelectUserAttributes(t, true)
- })
- t.Run("without_index", func(t *testing.T) {
- testSelectUserAttributes(t, false)
- })
-}
-
-func testSelectUserAttributes(t *testing.T, index bool) {
- t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -46,52 +36,44 @@ func testSelectUserAttributes(t *testing.T, index bool) {
testutil.AddAttribute(raw1, "foo", "bar")
testutil.AddAttribute(raw1, "x", "y")
- var putPrm meta.PutPrm
- putPrm.SetIndexAttributes(index)
- putPrm.SetObject(raw1)
- _, err := db.Put(context.Background(), putPrm)
+ err := putBig(db, raw1)
require.NoError(t, err)
raw2 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw2, "foo", "bar")
testutil.AddAttribute(raw2, "x", "z")
- putPrm.SetObject(raw2)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw2)
require.NoError(t, err)
raw3 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw3, "a", "b")
- putPrm.SetObject(raw3)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw3)
require.NoError(t, err)
raw4 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2")
+ testutil.AddAttribute(raw4, "path", "test/1/2")
- putPrm.SetObject(raw4)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw4)
require.NoError(t, err)
raw5 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3")
+ testutil.AddAttribute(raw5, "path", "test/1/3")
- putPrm.SetObject(raw5)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw5)
require.NoError(t, err)
raw6 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3")
+ testutil.AddAttribute(raw6, "path", "test/2/3")
- putPrm.SetObject(raw6)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw6)
require.NoError(t, err)
raw7 := testutil.GenerateObjectWithCID(cnr)
var attr objectSDK.Attribute
- attr.SetKey(objectSDK.AttributeFilePath)
- attr.SetValue("/test/3/4")
+ attr.SetKey("path")
+ attr.SetValue("test/3/4")
attrs := raw7.Attributes()
attrs = append(attrs, attr)
ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
@@ -99,39 +81,37 @@ func testSelectUserAttributes(t *testing.T, index bool) {
Attributes: attrs,
}, 0, 3, []byte{}, 0)
raw7.SetECHeader(ech)
- putPrm.SetObject(raw7)
- _, err = db.Put(context.Background(), putPrm)
- require.NoError(t, err)
+ require.NoError(t, putBig(db, raw7))
var raw7Parent oid.Address
raw7Parent.SetContainer(cnr)
raw7Parent.SetObject(ech.Parent())
fs := objectSDK.SearchFilters{}
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw1),
object.AddressOf(raw2),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1))
+ testSelect(t, db, cnr, fs, object.AddressOf(raw1))
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual)
- testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2))
+ testSelect(t, db, cnr, fs, object.AddressOf(raw2))
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "b", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3))
+ testSelect(t, db, cnr, fs, object.AddressOf(raw3))
fs = objectSDK.SearchFilters{}
fs.AddFilter("c", "d", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index)
+ testSelect(t, db, cnr, fs)
fs = objectSDK.SearchFilters{}
fs.AddFilter("foo", "", objectSDK.MatchNotPresent)
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
@@ -141,7 +121,7 @@ func testSelectUserAttributes(t *testing.T, index bool) {
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "", objectSDK.MatchNotPresent)
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw4),
@@ -151,7 +131,7 @@ func testSelectUserAttributes(t *testing.T, index bool) {
)
fs = objectSDK.SearchFilters{}
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
@@ -163,7 +143,7 @@ func testSelectUserAttributes(t *testing.T, index bool) {
fs = objectSDK.SearchFilters{}
fs.AddFilter("key", "", objectSDK.MatchNotPresent)
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
@@ -174,8 +154,8 @@ func testSelectUserAttributes(t *testing.T, index bool) {
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix)
- testSelect2(t, db, cnr, fs, index,
+ fs.AddFilter("path", "test", objectSDK.MatchCommonPrefix)
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
@@ -183,15 +163,15 @@ func testSelectUserAttributes(t *testing.T, index bool) {
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix)
- testSelect2(t, db, cnr, fs, index,
+ fs.AddFilter("path", "test/1", objectSDK.MatchCommonPrefix)
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw4),
object.AddressOf(raw5),
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index,
+ fs.AddFilter("path", "test/3/4", objectSDK.MatchStringEqual)
+ testSelect(t, db, cnr, fs,
raw7Parent,
)
}
@@ -200,7 +180,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -354,7 +334,7 @@ func TestDB_SelectInhume(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -372,7 +352,11 @@ func TestDB_SelectInhume(t *testing.T) {
object.AddressOf(raw2),
)
- err = metaInhume(db, object.AddressOf(raw2), oidtest.ID())
+ var tombstone oid.Address
+ tombstone.SetContainer(cnr)
+ tombstone.SetObject(oidtest.ID())
+
+ err = metaInhume(db, object.AddressOf(raw2), tombstone)
require.NoError(t, err)
fs = objectSDK.SearchFilters{}
@@ -385,7 +369,7 @@ func TestDB_SelectPayloadHash(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -456,7 +440,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -564,7 +548,7 @@ func TestDB_SelectObjectID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -680,7 +664,7 @@ func TestDB_SelectOwnerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -782,54 +766,6 @@ func TestDB_SelectOwnerID(t *testing.T) {
})
}
-func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
- t.Parallel()
-
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
- ecChunk1 := oidtest.ID()
- ecChunk2 := oidtest.ID()
- ecParent := oidtest.ID()
- var ecParentAddr oid.Address
- ecParentAddr.SetContainer(cnr)
- ecParentAddr.SetObject(ecParent)
- var ecParentAttr []objectSDK.Attribute
- var attr objectSDK.Attribute
- attr.SetKey(objectSDK.AttributeFilePath)
- attr.SetValue("/1/2/3")
- ecParentAttr = append(ecParentAttr, attr)
-
- chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetID(ecChunk1)
- chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
- chunkObj.SetPayloadSize(uint64(5))
- chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0))
-
- chunkObj2 := testutil.GenerateObjectWithCID(cnr)
- chunkObj2.SetID(ecChunk2)
- chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
- chunkObj2.SetPayloadSize(uint64(10))
- chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0))
-
- // put object with EC
-
- var prm meta.PutPrm
- prm.SetObject(chunkObj)
- _, err := db.Put(context.Background(), prm)
- require.NoError(t, err)
-
- prm.SetObject(chunkObj2)
- _, err = db.Put(context.Background(), prm)
- require.NoError(t, err)
-
- fs := objectSDK.SearchFilters{}
- fs.AddRootFilter()
- fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs, ecParentAddr)
-}
-
type testTarget struct {
objects []*objectSDK.Object
}
@@ -865,7 +801,7 @@ func TestDB_RawHead_SplitInfo(t *testing.T) {
)
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -906,7 +842,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
t.Run("first last, then linking", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
require.NoError(t, metaPut(db, lastPart, nil))
require.NoError(t, metaPut(db, linking, nil))
@@ -930,7 +866,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
})
t.Run("first linking, then last", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
require.NoError(t, metaPut(db, linking, nil))
require.NoError(t, metaPut(db, lastPart, nil))
@@ -954,7 +890,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde
})
t.Run("only last part", func(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
require.NoError(t, metaPut(db, lastPart, nil))
@@ -984,7 +920,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) {
)
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -1052,7 +988,7 @@ func TestDB_SelectSplitID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -1109,7 +1045,7 @@ func TestDB_SelectContainerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
@@ -1157,7 +1093,7 @@ func TestDB_SelectContainerID(t *testing.T) {
func BenchmarkSelect(b *testing.B) {
const objCount = 1000
db := newDB(b)
- defer func() { require.NoError(b, db.Close(context.Background())) }()
+ defer func() { require.NoError(b, db.Close()) }()
cid := cidtest.ID()
@@ -1199,25 +1135,23 @@ func TestExpiredObjects(t *testing.T) {
t.Parallel()
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
cidExp, _ := exp.ContainerID()
cidNonExp, _ := nonExp.ContainerID()
- objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false)
+ objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{})
require.NoError(t, err)
require.Empty(t, objs) // expired object should not be returned
- objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false)
+ objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{})
require.NoError(t, err)
require.NotEmpty(t, objs)
})
}
func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) {
- b.ReportAllocs()
-
var prm meta.SelectPrm
prm.SetContainerID(cid)
prm.SetFilters(fs)
@@ -1233,11 +1167,10 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
}
}
-func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) {
+func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
var prm meta.SelectPrm
prm.SetFilters(fs)
prm.SetContainerID(cnr)
- prm.SetUseAttributeIndex(useAttributeIndex)
res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go
index 72618b1a0..88446494e 100644
--- a/pkg/local_object_storage/metabase/shard_id.go
+++ b/pkg/local_object_storage/metabase/shard_id.go
@@ -2,7 +2,6 @@ package meta
import (
"bytes"
- "context"
"errors"
"fmt"
"os"
@@ -22,7 +21,7 @@ var (
// If id is missing, returns nil, nil.
//
// GetShardID does not report any metrics.
-func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) {
+func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = mode
@@ -31,14 +30,14 @@ func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error
return nil, nil
}
- if err := db.openDB(ctx, mode); err != nil {
- return nil, fmt.Errorf("open metabase: %w", err)
+ if err := db.openDB(mode); err != nil {
+ return nil, fmt.Errorf("failed to open metabase: %w", err)
}
id, err := db.readShardID()
if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
+ err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
}
return id, metaerr.Wrap(err)
@@ -60,7 +59,7 @@ func (db *DB) readShardID() ([]byte, error) {
// SetShardID sets metabase operation mode
// and writes shard id to db.
-func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error {
+func (db *DB) SetShardID(id []byte, mode metamode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
db.mode = mode
@@ -69,8 +68,8 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err
return ErrReadOnlyMode
}
- if err := db.openDB(ctx, mode); err != nil {
- return fmt.Errorf("open metabase: %w", err)
+ if err := db.openDB(mode); err != nil {
+ return fmt.Errorf("failed to open metabase: %w", err)
}
err := db.writeShardID(id)
@@ -79,7 +78,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err
}
if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
+ err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
}
return metaerr.Wrap(err)
diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go
index 8f2376503..6d620b41a 100644
--- a/pkg/local_object_storage/metabase/storage_id.go
+++ b/pkg/local_object_storage/metabase/storage_id.go
@@ -35,7 +35,7 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
-func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) {
+func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
var (
startedAt = time.Now()
success = false
@@ -53,32 +53,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, er
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
- var res StorageIDRes
if db.mode.NoMetabase() {
return res, ErrDegradedMode
}
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- res.id = db.storageID(tx, prm.addr)
- return nil
+ err = db.boltDB.View(func(tx *bbolt.Tx) error {
+ res.id, err = db.storageID(tx, prm.addr)
+
+ return err
})
success = err == nil
return res, metaerr.Wrap(err)
}
-func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte {
+func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) {
key := make([]byte, bucketKeySize)
smallBucket := tx.Bucket(smallBucketName(addr.Container(), key))
if smallBucket == nil {
- return nil
+ return nil, nil
}
storageID := smallBucket.Get(objectKey(addr.Object(), key))
if storageID == nil {
- return nil
+ return nil, nil
}
- return bytes.Clone(storageID)
+ return bytes.Clone(storageID), nil
}
// UpdateStorageIDPrm groups the parameters of UpdateStorageID operation.
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index fef680159..aaf6480ab 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -15,7 +15,7 @@ func TestDB_StorageID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
raw1 := testutil.GenerateObject()
raw2 := testutil.GenerateObject()
@@ -43,7 +43,7 @@ func TestDB_StorageID(t *testing.T) {
cnrID, ok := deleted.ContainerID()
require.True(t, ok)
ts := testutil.GenerateObjectWithCID(cnrID)
- require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object()))
+ require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts)))
// check StorageID for object without storageID
fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2))
@@ -79,7 +79,7 @@ func TestPutWritecacheDataRace(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
+ defer func() { require.NoError(t, db.Close()) }()
putStorageID := []byte{1, 2, 3}
wcStorageID := []byte{1, 2, 3, 4, 5}
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index 4948f3424..f677dcf8e 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/binary"
- "encoding/hex"
"errors"
"fmt"
"os"
@@ -12,8 +11,7 @@ import (
"sync/atomic"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -27,15 +25,15 @@ const (
upgradeTimeout = 1 * time.Second
)
-var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{
+var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{
2: upgradeFromV2ToV3,
- 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error {
+ 3: func(_ context.Context, _ *bbolt.DB, log func(a ...any)) error {
log("metabase already upgraded")
return nil
},
}
-func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error {
+func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error {
if _, err := os.Stat(path); err != nil {
return fmt.Errorf("check metabase existence: %w", err)
}
@@ -63,7 +61,7 @@ func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoPr
}); err != nil {
return fmt.Errorf("set upgrade key %w", err)
}
- if err := updater(ctx, db, cs, log); err != nil {
+ if err := updater(ctx, db, log); err != nil {
return fmt.Errorf("update metabase schema: %w", err)
}
if err := db.Update(func(tx *bbolt.Tx) error {
@@ -95,7 +93,7 @@ func compactDB(db *bbolt.DB) error {
NoSync: true,
})
if err != nil {
- return fmt.Errorf("open new metabase to compact: %w", err)
+ return fmt.Errorf("can't open new metabase to compact: %w", err)
}
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
@@ -115,21 +113,17 @@ func compactDB(db *bbolt.DB) error {
return nil
}
-func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
+func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
return err
}
- eg, ctx := errgroup.WithContext(ctx)
- eg.Go(func() error {
- return dropUserAttributes(ctx, db, cs, log)
- })
- eg.Go(func() error {
- return dropOwnerIDIndex(ctx, db, log)
- })
- eg.Go(func() error {
- return dropPayloadChecksumIndex(ctx, db, log)
- })
- if err := eg.Wait(); err != nil {
+ if err := dropUserAttributes(ctx, db, log); err != nil {
+ return err
+ }
+ if err := dropOwnerIDIndex(ctx, db, log); err != nil {
+ return err
+ }
+ if err := dropPayloadChecksumIndex(ctx, db, log); err != nil {
return err
}
return db.Update(func(tx *bbolt.Tx) error {
@@ -258,7 +252,7 @@ func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, i
continue
}
attributeKey := string(attrKey[1+cidSize:])
- if attributeKey != objectV2.SysAttributeExpEpoch {
+ if attributeKey != objectV2.SysAttributeExpEpochNeoFS && attributeKey != objectV2.SysAttributeExpEpoch {
continue
}
var containerID cid.ID
@@ -292,7 +286,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
}
expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
if err != nil {
- return fmt.Errorf("parse expiration epoch: %w", err)
+ return fmt.Errorf("could not parse expiration epoch: %w", err)
}
expirationEpochBucket := b.Bucket(attrValue)
attrKeyValueC := expirationEpochBucket.Cursor()
@@ -329,223 +323,10 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
return nil
}
-func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
- log("deleting user attribute buckets...")
- const batch = 1000
- prefix := []byte{userAttributePrefix}
- last := prefix
- var count uint64
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- var keys [][]byte
- if err := db.View(func(tx *bbolt.Tx) error {
- c := tx.Cursor()
- for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
- if bytes.Equal(last, k) {
- continue
- }
- keys = append(keys, bytes.Clone(k))
- }
- return nil
- }); err != nil {
- log("deleting user attribute buckets completed with an error:", err)
- return err
- }
- if len(keys) == 0 {
- log("deleting user attribute buckets completed successfully, deleted", count, "buckets")
- return nil
- }
- last = keys[len(keys)-1]
- cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys)
- if err != nil {
- log("deleting user attribute buckets completed with an error:", err)
- return err
- }
- count += cnt
- cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys)
- if err != nil {
- log("deleting user attribute buckets completed with an error:", err)
- return err
- }
- count += cnt
- log("deleted", count, "user attribute buckets")
- }
-}
-
-func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
- keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs)
- if err != nil {
- return 0, fmt.Errorf("select non indexed user attributes: %w", err)
- }
- if err := db.Batch(func(tx *bbolt.Tx) error {
- for _, k := range keysToDrop {
- if err := tx.DeleteBucket(k); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- return 0, fmt.Errorf("drop non indexed user attributes: %w", err)
- }
- return uint64(len(keysToDrop)), nil
-}
-
-func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
- var keysToDrop [][]byte
- for _, key := range keys {
- attr, ok := attributeFromAttributeBucket(key)
- if !ok {
- return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
- }
- if !IsAtrributeIndexed(attr) {
- keysToDrop = append(keysToDrop, key)
- continue
- }
- contID, ok := cidFromAttributeBucket(key)
- if !ok {
- return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
- }
- info, err := cs.Info(ctx, contID)
- if err != nil {
- return nil, err
- }
- if info.Removed || !info.Indexed {
- keysToDrop = append(keysToDrop, key)
- }
- }
- return keysToDrop, nil
-}
-
-func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) {
- var dropBuckets [][]byte
- for _, key := range keys {
- select {
- case <-ctx.Done():
- return 0, ctx.Err()
- default:
- }
-
- if err := dropEmptyNestedBuckets(ctx, db, key); err != nil {
- return 0, err
- }
-
- empty, exists, err := bucketIsEmpty(db, key)
- if err != nil {
- return 0, err
- }
- if empty && exists {
- dropBuckets = append(dropBuckets, key)
- }
- }
- if len(dropBuckets) == 0 {
- return 0, nil
- }
- if err := db.Batch(func(tx *bbolt.Tx) error {
- for _, key := range dropBuckets {
- if err := tx.DeleteBucket(key); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- return 0, fmt.Errorf("drop empty user attributes buckets: %w", err)
- }
- return uint64(len(dropBuckets)), nil
-}
-
-func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) {
- var empty bool
- var exists bool
- if err := db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(bucketKey)
- if b == nil {
- return nil
- }
- exists = true
- empty = !hasAnyItem(b)
- return nil
- }); err != nil {
- return false, false, fmt.Errorf("bucket empty check: %w", err)
- }
- return empty, exists, nil
-}
-
-func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error {
- var last []byte
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var dropBuckets [][]byte
- var err error
- dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last)
- if err != nil {
- return fmt.Errorf("select empty nested buckets: %w", err)
- }
- if len(dropBuckets) == 0 {
- return nil
- }
-
- if err := db.Batch(func(tx *bbolt.Tx) error {
- rootBucket := tx.Bucket(rootBucketKey)
- if rootBucket == nil {
- return nil
- }
- for _, sb := range dropBuckets {
- if err := rootBucket.DeleteBucket(sb); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- return fmt.Errorf("drop empty nested buckets: %w", err)
- }
- }
-}
-
-func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) {
- const batchSize = 1000
- var result [][]byte
- if err := db.View(func(tx *bbolt.Tx) error {
- rootBucket := tx.Bucket(rootBucketKey)
- if rootBucket == nil {
- return nil
- }
- c := rootBucket.Cursor()
- for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if bytes.Equal(last, k) {
- continue
- }
- last = bytes.Clone(k)
- if v != nil { // record
- continue
- }
- nestedBucket := rootBucket.Bucket(k)
- if nestedBucket == nil {
- continue
- }
- if !hasAnyItem(nestedBucket) {
- result = append(result, bytes.Clone(k))
- }
- }
- return nil
- }); err != nil {
- return nil, nil, err
- }
- return result, last, nil
+func dropUserAttributes(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{userAttributePrefix}, func(a ...any) {
+ log(append([]any{"user attributes:"}, a...)...)
+ })
}
func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
@@ -585,7 +366,7 @@ func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log f
log("deleting buckets completed successfully, deleted", count, "buckets")
return nil
}
- if err := db.Batch(func(tx *bbolt.Tx) error {
+ if err := db.Update(func(tx *bbolt.Tx) error {
for _, k := range keys {
if err := tx.DeleteBucket(k); err != nil {
return err
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index c90de4dd6..3797de0a4 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -11,12 +11,11 @@ import (
"testing"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -34,21 +33,15 @@ func TestUpgradeV2ToV3(t *testing.T) {
}()
db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t)))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion)
- require.NoError(t, db.Close(context.Background()))
- require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log))
+ require.ErrorIs(t, db.Init(), ErrOutdatedVersion)
+ require.NoError(t, db.Close())
+ require.NoError(t, Upgrade(context.Background(), path, true, t.Log))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
fmt.Println()
}
-type testContainerInfoProvider struct{}
-
-func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) {
- return container.Info{}, nil
-}
-
func createTempCopy(t *testing.T, path string) string {
src, err := os.Open(path)
require.NoError(t, err)
@@ -87,7 +80,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
db.boltDB.AllocSize = allocSize
db.boltDB.NoSync = true
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
containers := make([]cid.ID, containersCount)
for i := range containers {
containers[i] = cidtest.ID()
@@ -102,7 +95,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
@@ -113,7 +106,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects generated")
+ db.log.Info("simple objects generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// complex objects
@@ -125,8 +118,8 @@ func TestGenerateMetabaseFile(t *testing.T) {
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
- testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
- testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(child, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(parent, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
_, err := db.Put(ctx, PutPrm{
@@ -137,7 +130,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "complex objects generated")
+ db.log.Info("complex objects generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects deleted by gc marks
@@ -145,7 +138,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -159,7 +152,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects deleted by gc marks generated")
+ db.log.Info("simple objects deleted by gc marks generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(10000)
// simple objects deleted by tombstones
@@ -167,7 +160,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -189,7 +182,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects deleted by tombstones generated")
+ db.log.Info("simple objects deleted by tombstones generated")
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects locked by locks
@@ -197,7 +190,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -216,7 +209,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
})
}
require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects locked by locks generated")
+ db.log.Info("simple objects locked by locks generated")
require.NoError(t, db.boltDB.Sync())
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 4ad83332b..eef7210dc 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -100,6 +99,7 @@ const (
// userAttributePrefix was used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
+ // removed in version 3
userAttributePrefix
// ====================
@@ -170,28 +170,6 @@ func smallBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, smallPrefix, key)
}
-// attributeBucketName returns _.
-func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
- key[0] = userAttributePrefix
- cnr.Encode(key[1:])
- return append(key[:bucketKeySize], attributeKey...)
-}
-
-func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) {
- if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
- return cid.ID{}, false
- }
- var result cid.ID
- return result, result.Decode(bucketName[1:bucketKeySize]) == nil
-}
-
-func attributeFromAttributeBucket(bucketName []byte) (string, bool) {
- if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
- return "", false
- }
- return string(bucketName[bucketKeySize:]), true
-}
-
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
@@ -232,11 +210,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
epoch := binary.BigEndian.Uint64(key)
var cnr cid.ID
if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err)
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err)
}
var obj oid.ID
if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err)
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err)
}
return epoch, cnr, obj, nil
}
@@ -279,7 +257,9 @@ func objectKey(obj oid.ID, key []byte) []byte {
//
// firstIrregularObjectType(tx, cnr, obj) usage allows getting object type.
func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type {
- assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType")
+ if len(objs) == 0 {
+ panic("empty object list in firstIrregularObjectType")
+ }
var keys [2][1 + cidSize]byte
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index fbc0f1ad9..048bb9af6 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
- return fmt.Errorf("create auxiliary bucket: %w", err)
+ return fmt.Errorf("can't create auxiliary bucket: %w", err)
}
return b.Put(versionKey, data)
}
diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go
index b373fb32e..75229a1b4 100644
--- a/pkg/local_object_storage/metabase/version_test.go
+++ b/pkg/local_object_storage/metabase/version_test.go
@@ -45,25 +45,25 @@ func TestVersion(t *testing.T) {
t.Run("simple", func(t *testing.T) {
db := newDB(t)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
t.Run("reopen", func(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
})
t.Run("old data", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite))
+ require.NoError(t, db.SetShardID([]byte{1, 2, 3, 4}, mode.ReadWrite))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
t.Run("invalid version", func(t *testing.T) {
db := newDB(t)
@@ -71,37 +71,37 @@ func TestVersion(t *testing.T) {
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return updateVersion(tx, version+1)
}))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.Error(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.Error(t, db.Init())
+ require.NoError(t, db.Close())
t.Run("reset", func(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.Reset())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
})
t.Run("incompleted upgrade", func(t *testing.T) {
db := newDB(t)
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue)
}))
- require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade)
- require.NoError(t, db.Close(context.Background()))
+ require.ErrorIs(t, db.Init(), ErrIncompletedUpgrade)
+ require.NoError(t, db.Close())
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(shardInfoBucket).Delete(upgradeKey)
}))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
})
}
diff --git a/pkg/local_object_storage/pilorama/batch.go b/pkg/local_object_storage/pilorama/batch.go
index 4c5238921..520c6dfb4 100644
--- a/pkg/local_object_storage/pilorama/batch.go
+++ b/pkg/local_object_storage/pilorama/batch.go
@@ -1,9 +1,9 @@
package pilorama
import (
- "cmp"
"encoding/binary"
"slices"
+ "sort"
"sync"
"time"
@@ -48,8 +48,8 @@ func (b *batch) run() {
// Sorting without a mutex is ok, because we append to this slice only if timer is non-nil.
// See (*boltForest).addBatch for details.
- slices.SortFunc(b.operations, func(mi, mj *Move) int {
- return cmp.Compare(mi.Time, mj.Time)
+ sort.Slice(b.operations, func(i, j int) bool {
+ return b.operations[i].Time < b.operations[j].Time
})
b.operations = slices.CompactFunc(b.operations, func(x, y *Move) bool { return x.Time == y.Time })
diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go
index 3156751f2..22b951a41 100644
--- a/pkg/local_object_storage/pilorama/bench_test.go
+++ b/pkg/local_object_storage/pilorama/bench_test.go
@@ -28,8 +28,8 @@ func BenchmarkCreate(b *testing.B) {
WithPath(filepath.Join(tmpDir, "test.db")),
WithMaxBatchSize(runtime.GOMAXPROCS(0)))
require.NoError(b, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(b, f.Init(context.Background()))
- defer func() { require.NoError(b, f.Close(context.Background())) }()
+ require.NoError(b, f.Init())
+ defer func() { require.NoError(b, f.Close()) }()
b.Cleanup(func() {
require.NoError(b, os.RemoveAll(tmpDir))
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 897b37ea0..e2d69cafa 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -10,6 +10,7 @@ import (
"os"
"path/filepath"
"slices"
+ "sort"
"strconv"
"sync"
"time"
@@ -91,7 +92,7 @@ func NewBoltForest(opts ...Option) ForestStorage {
return &b
}
-func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
+func (t *boltForest) SetMode(m mode.Mode) error {
t.modeMtx.Lock()
defer t.modeMtx.Unlock()
@@ -99,14 +100,14 @@ func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
return nil
}
- err := t.Close(ctx)
+ err := t.Close()
if err == nil && !m.NoMetabase() {
if err = t.openBolt(m); err == nil {
- err = t.Init(ctx)
+ err = t.Init()
}
}
if err != nil {
- return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
+ return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
}
t.mode = m
@@ -128,7 +129,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
readOnly := m.ReadOnly()
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err))
+ return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err))
}
opts := *bbolt.DefaultOptions
@@ -139,7 +140,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
t.db, err = bbolt.Open(t.path, t.perm, &opts)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err))
+ return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err))
}
t.db.MaxBatchSize = t.maxBatchSize
@@ -148,7 +149,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
return nil
}
-func (t *boltForest) Init(context.Context) error {
+func (t *boltForest) Init() error {
if t.mode.NoMetabase() || t.db.IsReadOnly() {
return nil
}
@@ -162,7 +163,7 @@ func (t *boltForest) Init(context.Context) error {
})
}
-func (t *boltForest) Close(context.Context) error {
+func (t *boltForest) Close() error {
var err error
if t.db != nil {
err = t.db.Close()
@@ -419,7 +420,10 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri
return err
}
- i, node := t.getPathPrefix(bTree, attr, path)
+ i, node, err := t.getPathPrefix(bTree, attr, path)
+ if err != nil {
+ return err
+ }
ts := t.getLatestTimestamp(bLog, d.Position, d.Size)
lm = make([]Move, len(path)-i+1)
@@ -555,80 +559,6 @@ func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string
return metaerr.Wrap(err)
}
-func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch",
- trace.WithAttributes(
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- m, err := t.filterSeen(cnr, treeID, m)
- if err != nil {
- return err
- }
- if len(m) == 0 {
- success = true
- return nil
- }
-
- ch := make(chan error)
- b := &batch{
- forest: t,
- cid: cnr,
- treeID: treeID,
- results: []chan<- error{ch},
- operations: m,
- }
- go func() {
- b.run()
- }()
- err = <-ch
- success = err == nil
- return metaerr.Wrap(err)
-}
-
-func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) {
- t.modeMtx.RLock()
- defer t.modeMtx.RUnlock()
-
- if t.mode.NoMetabase() {
- return nil, ErrDegradedMode
- }
-
- ops := make([]*Move, 0, len(m))
- err := t.db.View(func(tx *bbolt.Tx) error {
- treeRoot := tx.Bucket(bucketName(cnr, treeID))
- if treeRoot == nil {
- ops = m
- return nil
- }
- b := treeRoot.Bucket(logBucket)
- for _, op := range m {
- var logKey [8]byte
- binary.BigEndian.PutUint64(logKey[:], op.Time)
- seen := b.Get(logKey[:]) != nil
- if !seen {
- ops = append(ops, op)
- }
- }
- return nil
- })
- if err != nil {
- return nil, metaerr.Wrap(err)
- }
- return ops, nil
-}
-
// TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed.
func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error {
var (
@@ -775,7 +705,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M
key, value = c.Prev()
}
- for i := range ms {
+ for i := range len(ms) {
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
// 2. Insert the operation.
@@ -977,7 +907,10 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st
b := treeRoot.Bucket(dataBucket)
- i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
+ i, curNodes, err := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
+ if err != nil {
+ return err
+ }
if i < len(path)-1 {
return nil
}
@@ -1077,7 +1010,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol
}
// TreeSortedByFilename implements the Forest interface.
-func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
+func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) {
var (
startedAt = time.Now()
success = false
@@ -1155,24 +1088,19 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
}
if len(res) != 0 {
s := string(findAttr(res[len(res)-1].Meta, AttributeFilename))
- last = NewCursor(s, res[len(res)-1].LastChild())
+ last = &s
}
return res, last, metaerr.Wrap(err)
}
-func sortByFilename(nodes []NodeInfo) {
- slices.SortFunc(nodes, func(a, b NodeInfo) int {
- return bytes.Compare(a.Meta.GetAttr(AttributeFilename), b.Meta.GetAttr(AttributeFilename))
- })
-}
-
-func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo {
+func sortAndCut(result []NodeInfo, last *string) []NodeInfo {
var lastBytes []byte
if last != nil {
- lastBytes = []byte(last.GetFilename())
+ lastBytes = []byte(*last)
}
- sortByFilename(result)
-
+ sort.Slice(result, func(i, j int) bool {
+ return bytes.Compare(result[i].Meta.GetAttr(AttributeFilename), result[j].Meta.GetAttr(AttributeFilename)) == -1
+ })
for i := range result {
if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 {
return result[i:]
@@ -1234,7 +1162,7 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f
nodes = nil
length = actualLength + 1
count = 0
- c.Seek(binary.LittleEndian.AppendUint16(prefix, length))
+ c.Seek(append(prefix, byte(length), byte(length>>8)))
c.Prev() // c.Next() will be performed by for loop
}
}
@@ -1354,7 +1282,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err
return nil
})
if err != nil {
- return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err))
+ return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err))
}
success = true
return ids, nil
@@ -1498,7 +1426,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
var contID cidSDK.ID
if err := contID.Decode(k[:32]); err != nil {
- return fmt.Errorf("decode container ID: %w", err)
+ return fmt.Errorf("failed to decode containerID: %w", err)
}
res.Items = append(res.Items, ContainerIDTreeID{
CID: contID,
@@ -1506,7 +1434,8 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
})
if len(res.Items) == batchSize {
- res.NextPageToken = bytes.Clone(k)
+ res.NextPageToken = make([]byte, len(k))
+ copy(res.NextPageToken, k)
break
}
}
@@ -1519,7 +1448,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
return &res, nil
}
-func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) {
+func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node, error) {
c := bTree.Cursor()
var curNodes []Node
@@ -1542,14 +1471,14 @@ func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr strin
}
if len(nextNodes) == 0 {
- return i, curNodes
+ return i, curNodes, nil
}
}
- return len(path), nextNodes
+ return len(path), nextNodes, nil
}
-func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) {
+func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) {
c := bTree.Cursor()
var curNode Node
@@ -1569,10 +1498,10 @@ loop:
childKey, value = c.Next()
}
- return i, curNode
+ return i, curNode, nil
}
- return len(path), curNode
+ return len(path), curNode, nil
}
func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
@@ -1582,12 +1511,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:])
- return lm.FromBytes(data[16:])
+ return lm.Meta.FromBytes(data[16:])
}
func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter()
- size := 8 + 8 + lm.Size() + 1
+ size := 8 + 8 + lm.Meta.Size() + 1
// if lm.HasOld {
// size += 8 + lm.Old.Meta.Size()
// }
@@ -1595,7 +1524,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size)
w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent)
- lm.EncodeBinary(w.BinWriter)
+ lm.Meta.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld)
// if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent)
@@ -1657,7 +1586,7 @@ func internalKeyPrefix(key []byte, k string) []byte {
key = append(key, 'i')
l := len(k)
- key = binary.LittleEndian.AppendUint16(key, uint16(l))
+ key = append(key, byte(l), byte(l>>8))
key = append(key, k...)
return key
}
@@ -1672,10 +1601,14 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte {
key = internalKeyPrefix(key, k)
l := len(v)
- key = binary.LittleEndian.AppendUint16(key, uint16(l))
+ key = append(key, byte(l), byte(l>>8))
key = append(key, v...)
- key = binary.LittleEndian.AppendUint64(key, parent)
- key = binary.LittleEndian.AppendUint64(key, node)
+ var raw [8]byte
+ binary.LittleEndian.PutUint64(raw[:], parent)
+ key = append(key, raw[:]...)
+
+ binary.LittleEndian.PutUint64(raw[:], node)
+ key = append(key, raw[:]...)
return key
}
diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go
index ebfd0bcc0..78503bada 100644
--- a/pkg/local_object_storage/pilorama/forest.go
+++ b/pkg/local_object_storage/pilorama/forest.go
@@ -1,10 +1,10 @@
package pilorama
import (
+ "bytes"
"context"
"errors"
"fmt"
- "slices"
"sort"
"strings"
@@ -85,7 +85,8 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID
s.operations = append(s.operations, op)
}
- mCopy := slices.Clone(m)
+ mCopy := make([]KeyValue, len(m))
+ copy(mCopy, m)
op := s.do(&Move{
Parent: node,
Meta: Meta{
@@ -111,16 +112,7 @@ func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, o
return s.Apply(op)
}
-func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error {
- for _, op := range ops {
- if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (f *memoryForest) Init(context.Context) error {
+func (f *memoryForest) Init() error {
return nil
}
@@ -128,11 +120,11 @@ func (f *memoryForest) Open(context.Context, mode.Mode) error {
return nil
}
-func (f *memoryForest) SetMode(context.Context, mode.Mode) error {
+func (f *memoryForest) SetMode(mode.Mode) error {
return nil
}
-func (f *memoryForest) Close(context.Context) error {
+func (f *memoryForest) Close() error {
return nil
}
func (f *memoryForest) SetParentID(string) {}
@@ -164,7 +156,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string,
}
// TreeSortedByFilename implements the Forest interface.
-func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
+func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -177,7 +169,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
var res []NodeInfo
for _, nodeID := range nodeIDs {
- children := s.getChildren(nodeID)
+ children := s.tree.getChildren(nodeID)
for _, childID := range children {
var found bool
for _, kv := range s.infoMap[childID].Meta.Items {
@@ -200,18 +192,23 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
return nil, start, nil
}
- sortByFilename(res)
+ sort.Slice(res, func(i, j int) bool {
+ return bytes.Compare(res[i].Meta.GetAttr(AttributeFilename), res[j].Meta.GetAttr(AttributeFilename)) == -1
+ })
r := mergeNodeInfos(res)
for i := range r {
- if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() {
- finish := min(len(res), i+count)
+ if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start {
+ finish := i + count
+ if len(res) < finish {
+ finish = len(res)
+ }
last := string(findAttr(r[finish-1].Meta, AttributeFilename))
- return r[i:finish], NewCursor(last, 0), nil
+ return r[i:finish], &last, nil
}
}
last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
- return nil, NewCursor(last, 0), nil
+ return nil, &last, nil
}
// TreeGetChildren implements the Forest interface.
@@ -222,7 +219,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str
return nil, ErrTreeNotFound
}
- children := s.getChildren(nodeID)
+ children := s.tree.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children))
for _, childID := range children {
res = append(res, NodeInfo{
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 844084c55..854fe0aad 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -30,7 +30,7 @@ var providers = []struct {
{"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
f := NewMemoryForest()
require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Init())
return f
}},
{"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
@@ -40,7 +40,7 @@ var providers = []struct {
WithMaxBatchSize(1),
}, opts...)...)
require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Init())
return f
}},
}
@@ -61,7 +61,7 @@ func TestForest_TreeMove(t *testing.T) {
}
func testForestTreeMove(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -125,7 +125,7 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
}
func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -247,7 +247,7 @@ func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
}
func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
}
var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
+ treeAppend := func(t *testing.T, last *string, count int) *string {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err)
result = append(result, res...)
@@ -302,7 +302,7 @@ func TestForest_TreeSortedIteration(t *testing.T) {
}
func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
}
var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
+ treeAppend := func(t *testing.T, last *string, count int) *string {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err)
result = append(result, res...)
@@ -361,7 +361,7 @@ func TestForest_TreeSortedFilename(t *testing.T) {
}
func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
const controlAttr = "control_attr"
cid := cidtest.ID()
@@ -453,7 +453,7 @@ func TestForest_TreeDrop(t *testing.T) {
}
func testForestTreeDrop(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
const cidsSize = 3
var cids [cidsSize]cidSDK.ID
@@ -523,7 +523,7 @@ func TestForest_TreeAdd(t *testing.T) {
}
func testForestTreeAdd(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -571,7 +571,7 @@ func TestForest_TreeAddByPath(t *testing.T) {
}
func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
@@ -709,7 +709,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
t.Run("add a child, then insert a parent removal", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
@@ -722,7 +722,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
})
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
testApply(t, s, 11, 10, meta)
@@ -792,7 +792,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
t.Run("expected", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
for i := range logs {
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
@@ -801,7 +801,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
})
s := constructor(t, WithMaxBatchSize(batchSize))
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
for range batchSize {
@@ -842,7 +842,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
}
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
t.Run("empty log, no panic", func(t *testing.T) {
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
@@ -883,7 +883,7 @@ func TestForest_TreeExists(t *testing.T) {
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
actual, err := s.TreeExists(context.Background(), cid, treeID)
@@ -942,7 +942,7 @@ func TestApplyTricky1(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1005,7 +1005,7 @@ func TestApplyTricky2(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1081,7 +1081,7 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
}
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
- for i := range uint64(nodeCount) {
+ for i := uint64(0); i < uint64(nodeCount); i++ {
expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
@@ -1115,7 +1115,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close(context.Background())) }()
+ defer func() { require.NoError(t, expected.Close()) }()
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1145,7 +1145,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
wg.Wait()
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close(context.Background()))
+ require.NoError(t, actual.Close())
}
}
@@ -1163,7 +1163,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close(context.Background())) }()
+ defer func() { require.NoError(t, expected.Close()) }()
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
@@ -1179,7 +1179,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close(context.Background()))
+ require.NoError(t, actual.Close())
}
}
@@ -1197,7 +1197,7 @@ func BenchmarkApplySequential(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close(context.Background())) }()
+ defer func() { require.NoError(b, s.Close()) }()
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
@@ -1233,7 +1233,7 @@ func BenchmarkApplyReorderLast(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close(context.Background())) }()
+ defer func() { require.NoError(b, s.Close()) }()
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
@@ -1290,7 +1290,7 @@ func TestTreeGetByPath(t *testing.T) {
}
func testTreeGetByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID()
treeID := "version"
@@ -1369,7 +1369,7 @@ func TestGetTrees(t *testing.T) {
}
func testTreeGetTrees(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ defer func() { require.NoError(t, s.Close()) }()
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
d := CIDDescriptor{Position: 0, Size: 1}
@@ -1415,7 +1415,7 @@ func TestTreeLastSyncHeight(t *testing.T) {
}
func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
- defer func() { require.NoError(t, f.Close(context.Background())) }()
+ defer func() { require.NoError(t, f.Close()) }()
cnr := cidtest.ID()
treeID := "someTree"
diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go
index b035be1e1..5a00bcf7a 100644
--- a/pkg/local_object_storage/pilorama/heap.go
+++ b/pkg/local_object_storage/pilorama/heap.go
@@ -30,13 +30,13 @@ func (h *filenameHeap) Pop() any {
// fixedHeap maintains a fixed number of smallest elements started at some point.
type fixedHeap struct {
- start *Cursor
+ start *string
sorted bool
count int
h *filenameHeap
}
-func newHeap(start *Cursor, count int) *fixedHeap {
+func newHeap(start *string, count int) *fixedHeap {
h := new(filenameHeap)
heap.Init(h)
@@ -50,19 +50,8 @@ func newHeap(start *Cursor, count int) *fixedHeap {
const amortizationMultiplier = 5
func (h *fixedHeap) push(id MultiNode, filename string) bool {
- if h.start != nil {
- if filename < h.start.GetFilename() {
- return false
- } else if filename == h.start.GetFilename() {
- // A tree may have a lot of nodes with the same filename but different versions so that
- // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call
- // with the same filename.
- pos := slices.Index(id, h.start.GetNode())
- if pos == -1 || pos+1 >= len(id) {
- return false
- }
- id = id[pos+1:]
- }
+ if h.start != nil && filename <= *h.start {
+ return false
}
*h.h = append(*h.h, heapInfo{id: id, filename: filename})
diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go
index 28b7faec8..c9f5df3b7 100644
--- a/pkg/local_object_storage/pilorama/inmemory.go
+++ b/pkg/local_object_storage/pilorama/inmemory.go
@@ -1,9 +1,6 @@
package pilorama
-import (
- "cmp"
- "slices"
-)
+import "sort"
// nodeInfo couples parent and metadata.
type nodeInfo struct {
@@ -35,9 +32,9 @@ func newMemoryTree() *memoryTree {
// undo un-does op and changes s in-place.
func (s *memoryTree) undo(op *move) {
if op.HasOld {
- s.infoMap[op.Child] = op.Old
+ s.tree.infoMap[op.Child] = op.Old
} else {
- delete(s.infoMap, op.Child)
+ delete(s.tree.infoMap, op.Child)
}
}
@@ -83,8 +80,8 @@ func (s *memoryTree) do(op *Move) move {
},
}
- shouldPut := !s.isAncestor(op.Child, op.Parent)
- p, ok := s.infoMap[op.Child]
+ shouldPut := !s.tree.isAncestor(op.Child, op.Parent)
+ p, ok := s.tree.infoMap[op.Child]
if ok {
lm.HasOld = true
lm.Old = p
@@ -100,7 +97,7 @@ func (s *memoryTree) do(op *Move) move {
p.Meta = m
p.Parent = op.Parent
- s.infoMap[op.Child] = p
+ s.tree.infoMap[op.Child] = p
return lm
}
@@ -134,10 +131,10 @@ func (t tree) getChildren(parent Node) []Node {
}
}
- slices.SortFunc(children, func(ci, cj uint64) int {
- a := t.infoMap[ci]
- b := t.infoMap[cj]
- return cmp.Compare(a.Meta.Time, b.Meta.Time)
+ sort.Slice(children, func(i, j int) bool {
+ a := t.infoMap[children[i]]
+ b := t.infoMap[children[j]]
+ return a.Meta.Time < b.Meta.Time
})
return children
}
@@ -192,7 +189,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
}
var nodes []Node
- var lastTS Timestamp
+ var lastTs Timestamp
children := t.getChildren(curNode)
for i := range children {
@@ -200,7 +197,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
fileName := string(info.Meta.GetAttr(attr))
if fileName == path[len(path)-1] {
if latest {
- if info.Meta.Time >= lastTS {
+ if info.Meta.Time >= lastTs {
nodes = append(nodes[:0], children[i])
}
} else {
diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go
index e1f6cd8e7..61a3849bf 100644
--- a/pkg/local_object_storage/pilorama/interface.go
+++ b/pkg/local_object_storage/pilorama/interface.go
@@ -21,8 +21,6 @@ type Forest interface {
// TreeApply applies replicated operation from another node.
// If background is true, TreeApply will first check whether an operation exists.
TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
- // TreeApplyBatch applies replicated operations from another node.
- TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error
// TreeGetByPath returns all nodes corresponding to the path.
// The path is constructed by descending from the root using the values of the
// AttributeFilename in meta.
@@ -37,7 +35,7 @@ type Forest interface {
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
// TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error)
+ TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
@@ -62,10 +60,10 @@ type Forest interface {
type ForestStorage interface {
// DumpInfo returns information about the pilorama.
DumpInfo() Info
- Init(context.Context) error
+ Init() error
Open(context.Context, mode.Mode) error
- Close(context.Context) error
- SetMode(context.Context, mode.Mode) error
+ Close() error
+ SetMode(m mode.Mode) error
SetParentID(id string)
Forest
@@ -79,38 +77,6 @@ const (
AttributeVersion = "Version"
)
-// Cursor keeps state between function calls for traversing nodes.
-// It stores the attributes associated with a previous call, allowing subsequent operations
-// to resume traversal from this point rather than starting from the beginning.
-type Cursor struct {
- // Last traversed filename.
- filename string
-
- // Last traversed node.
- node Node
-}
-
-func NewCursor(filename string, node Node) *Cursor {
- return &Cursor{
- filename: filename,
- node: node,
- }
-}
-
-func (c *Cursor) GetFilename() string {
- if c == nil {
- return ""
- }
- return c.filename
-}
-
-func (c *Cursor) GetNode() Node {
- if c == nil {
- return Node(0)
- }
- return c.node
-}
-
// CIDDescriptor contains container ID and information about the node position
// in the list of container nodes.
type CIDDescriptor struct {
diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go
index 0c042aa56..01d3da9f0 100644
--- a/pkg/local_object_storage/pilorama/mode_test.go
+++ b/pkg/local_object_storage/pilorama/mode_test.go
@@ -19,13 +19,13 @@ func Test_Mode(t *testing.T) {
require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Init())
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close(context.Background()))
+ require.NoError(t, f.Close())
require.NoError(t, f.Open(context.Background(), mode.Degraded))
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Init())
require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close(context.Background()))
+ require.NoError(t, f.Close())
}
diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go
index 36d347f10..106ba6ae9 100644
--- a/pkg/local_object_storage/pilorama/multinode.go
+++ b/pkg/local_object_storage/pilorama/multinode.go
@@ -25,10 +25,6 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool {
return true
}
-func (r *MultiNodeInfo) LastChild() Node {
- return r.Children[len(r.Children)-1]
-}
-
func (n NodeInfo) ToMultiNode() MultiNodeInfo {
return MultiNodeInfo{
Children: MultiNode{n.ID},
diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go
index eecee1527..54c2b90a6 100644
--- a/pkg/local_object_storage/pilorama/split_test.go
+++ b/pkg/local_object_storage/pilorama/split_test.go
@@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) {
require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4"))
require.Equal(t, []byte{10}, testGetByPath(t, "value0"))
- testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) {
+ testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) {
res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize)
require.NoError(t, err)
return res, last
diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go
index b4015ae8d..364649b50 100644
--- a/pkg/local_object_storage/shard/container.go
+++ b/pkg/local_object_storage/shard/container.go
@@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 {
return r.size
}
-func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
+func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
s.m.RLock()
defer s.m.RUnlock()
@@ -34,15 +34,9 @@ func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (Contai
return ContainerSizeRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ContainerSizeRes{}, err
- }
- defer release()
-
size, err := s.metaBase.ContainerSize(prm.cnr)
if err != nil {
- return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err)
+ return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err)
}
return ContainerSizeRes{
@@ -75,15 +69,9 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont
return ContainerCountRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ContainerCountRes{}, err
- }
- defer release()
-
counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
if err != nil {
- return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err)
+ return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err)
}
return ContainerCountRes{
@@ -112,12 +100,6 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
return s.metaBase.DeleteContainerSize(ctx, id)
}
@@ -140,11 +122,5 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
return s.metaBase.DeleteContainerCount(ctx, id)
}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index d489b8b0d..de881654a 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -20,25 +19,25 @@ import (
"golang.org/x/sync/errgroup"
)
-func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error {
- s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode,
+func (s *Shard) handleMetabaseFailure(stage string, err error) error {
+ s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.ReadOnly),
zap.Error(err))
- err = s.SetMode(ctx, mode.ReadOnly)
+ err = s.SetMode(mode.ReadOnly)
if err == nil {
return nil
}
- s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode,
+ s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.DegradedReadOnly),
zap.Error(err))
- err = s.SetMode(ctx, mode.DegradedReadOnly)
+ err = s.SetMode(mode.DegradedReadOnly)
if err != nil {
- return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly)
+ return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly))
}
return nil
}
@@ -72,10 +71,10 @@ func (s *Shard) Open(ctx context.Context) error {
for j := i + 1; j < len(components); j++ {
if err := components[j].Open(ctx, m); err != nil {
// Other components must be opened, fail.
- return fmt.Errorf("open %T: %w", components[j], err)
+ return fmt.Errorf("could not open %T: %w", components[j], err)
}
}
- err = s.handleMetabaseFailure(ctx, "open", err)
+ err = s.handleMetabaseFailure("open", err)
if err != nil {
return err
}
@@ -83,7 +82,7 @@ func (s *Shard) Open(ctx context.Context) error {
break
}
- return fmt.Errorf("open %T: %w", component, err)
+ return fmt.Errorf("could not open %T: %w", component, err)
}
}
return nil
@@ -91,8 +90,8 @@ func (s *Shard) Open(ctx context.Context) error {
type metabaseSynchronizer Shard
-func (x *metabaseSynchronizer) Init(ctx context.Context) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init")
+func (x *metabaseSynchronizer) Init() error {
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init")
defer span.End()
return (*Shard)(x).refillMetabase(ctx)
@@ -101,24 +100,26 @@ func (x *metabaseSynchronizer) Init(ctx context.Context) error {
// Init initializes all Shard's components.
func (s *Shard) Init(ctx context.Context) error {
m := s.GetMode()
- if err := s.initializeComponents(ctx, m); err != nil {
+ if err := s.initializeComponents(m); err != nil {
return err
}
s.updateMetrics(ctx)
s.gc = &gc{
- gcCfg: &s.gcCfg,
- remover: s.removeGarbage,
- stopChannel: make(chan struct{}),
- newEpochChan: make(chan uint64),
- newEpochHandlers: &newEpochHandlers{
- cancelFunc: func() {},
- handlers: []newEpochHandler{
- s.collectExpiredLocks,
- s.collectExpiredObjects,
- s.collectExpiredTombstones,
- s.collectExpiredMetrics,
+ gcCfg: &s.gcCfg,
+ remover: s.removeGarbage,
+ stopChannel: make(chan struct{}),
+ eventChan: make(chan Event),
+ mEventHandler: map[eventType]*eventHandlers{
+ eventNewEpoch: {
+ cancelFunc: func() {},
+ handlers: []eventHandler{
+ s.collectExpiredLocks,
+ s.collectExpiredObjects,
+ s.collectExpiredTombstones,
+ s.collectExpiredMetrics,
+ },
},
},
}
@@ -136,9 +137,9 @@ func (s *Shard) Init(ctx context.Context) error {
return nil
}
-func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
+func (s *Shard) initializeComponents(m mode.Mode) error {
type initializer interface {
- Init(context.Context) error
+ Init() error
}
var components []initializer
@@ -168,13 +169,13 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
}
for _, component := range components {
- if err := component.Init(ctx); err != nil {
+ if err := component.Init(); err != nil {
if component == s.metaBase {
if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) {
return fmt.Errorf("metabase initialization: %w", err)
}
- err = s.handleMetabaseFailure(ctx, "init", err)
+ err = s.handleMetabaseFailure("init", err)
if err != nil {
return err
}
@@ -182,7 +183,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
break
}
- return fmt.Errorf("initialize %T: %w", component, err)
+ return fmt.Errorf("could not initialize %T: %w", component, err)
}
}
return nil
@@ -203,19 +204,19 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err := s.metaBase.Reset()
if err != nil {
- return fmt.Errorf("reset metabase: %w", err)
+ return fmt.Errorf("could not reset metabase: %w", err)
}
withCount := true
totalObjects, err := s.blobStor.ObjectsCount(ctx)
if err != nil {
- s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
+ s.log.Warn(logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
withCount = false
}
eg, egCtx := errgroup.WithContext(ctx)
- if s.refillMetabaseWorkersCount > 0 {
- eg.SetLimit(s.refillMetabaseWorkersCount)
+ if s.cfg.refillMetabaseWorkersCount > 0 {
+ eg.SetLimit(s.cfg.refillMetabaseWorkersCount)
}
var completedCount uint64
@@ -252,12 +253,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err = errors.Join(egErr, itErr)
if err != nil {
- return fmt.Errorf("put objects to the meta: %w", err)
+ return fmt.Errorf("could not put objects to the meta: %w", err)
}
err = s.metaBase.SyncCounters()
if err != nil {
- return fmt.Errorf("sync object counters: %w", err)
+ return fmt.Errorf("could not sync object counters: %w", err)
}
success = true
@@ -268,27 +269,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error {
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
+ s.log.Warn(logs.ShardCouldNotUnmarshalObject,
zap.Stringer("address", addr),
- zap.Error(err))
+ zap.String("err", err.Error()))
return nil
}
- hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0
-
- var isIndexedContainer bool
- if hasIndexedAttribute {
- info, err := s.containerInfo.Info(ctx, addr.Container())
- if err != nil {
- return err
- }
- if info.Removed {
- s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
- return nil
- }
- isIndexedContainer = info.Indexed
- }
-
var err error
switch obj.Type() {
case objectSDK.TypeTombstone:
@@ -304,7 +290,6 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
var mPrm meta.PutPrm
mPrm.SetObject(obj)
mPrm.SetStorageID(descriptor)
- mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer)
_, err = s.metaBase.Put(ctx, mPrm)
if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
@@ -316,7 +301,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("unmarshal lock content: %w", err)
+ return fmt.Errorf("could not unmarshal lock content: %w", err)
}
locked := make([]oid.ID, lock.NumberOfMembers())
@@ -326,7 +311,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err
id, _ := obj.ID()
err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
- return fmt.Errorf("lock objects: %w", err)
+ return fmt.Errorf("could not lock objects: %w", err)
}
return nil
}
@@ -335,7 +320,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("unmarshal tombstone content: %w", err)
+ return fmt.Errorf("could not unmarshal tombstone content: %w", err)
}
tombAddr := object.AddressOf(obj)
@@ -356,18 +341,17 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
_, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
- return fmt.Errorf("inhume objects: %w", err)
+ return fmt.Errorf("could not inhume objects: %w", err)
}
return nil
}
// Close releases all Shard's components.
-func (s *Shard) Close(ctx context.Context) error {
- unlock := s.lockExclusive()
+func (s *Shard) Close() error {
if s.rb != nil {
- s.rb.Stop(ctx, s.log)
+ s.rb.Stop(s.log)
}
- var components []interface{ Close(context.Context) error }
+ components := []interface{ Close() error }{}
if s.pilorama != nil {
components = append(components, s.pilorama)
@@ -383,23 +367,15 @@ func (s *Shard) Close(ctx context.Context) error {
var lastErr error
for _, component := range components {
- if err := component.Close(ctx); err != nil {
+ if err := component.Close(); err != nil {
lastErr = err
- s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err))
+ s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
}
}
- if s.opsLimiter != nil {
- s.opsLimiter.Close()
- }
-
- unlock()
-
- // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock.
- // So to prevent deadlock GC stopping is outside of exclusive lock.
// If Init/Open was unsuccessful gc can be nil.
if s.gc != nil {
- s.gc.stop(ctx)
+ s.gc.stop()
}
return lastErr
@@ -421,18 +397,18 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
unlock := s.lockExclusive()
defer unlock()
- s.rb.Stop(ctx, s.log)
+ s.rb.Stop(s.log)
if !s.info.Mode.NoMetabase() {
defer func() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}()
}
- ok, err := s.metaBase.Reload(ctx, c.metaOpts...)
+ ok, err := s.metaBase.Reload(c.metaOpts...)
if err != nil {
if errors.Is(err, meta.ErrDegradedMode) {
- s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
- _ = s.setMode(ctx, mode.DegradedReadOnly)
+ s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
+ _ = s.setMode(mode.DegradedReadOnly)
}
return err
}
@@ -444,19 +420,15 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
// config after the node was updated.
err = s.refillMetabase(ctx)
} else {
- err = s.metaBase.Init(ctx)
+ err = s.metaBase.Init()
}
if err != nil {
- s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
- _ = s.setMode(ctx, mode.DegradedReadOnly)
+ s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
+ _ = s.setMode(mode.DegradedReadOnly)
return err
}
}
- if c.opsLimiter != nil {
- s.opsLimiter.Close()
- s.opsLimiter = c.opsLimiter
- }
- return s.setMode(ctx, c.info.Mode)
+ return s.setMode(c.info.Mode)
}
func (s *Shard) lockExclusive() func() {
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 6d2cd7137..44fee1636 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -86,7 +86,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadWrite, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
// Metabase can be opened in read-only => start in ReadOnly mode.
allowedMode.Store(int64(os.O_RDONLY))
@@ -95,9 +95,9 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite))
+ require.Error(t, sh.SetMode(mode.ReadWrite))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
// Metabase is corrupted => start in DegradedReadOnly mode.
allowedMode.Store(math.MaxInt64)
@@ -106,7 +106,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.DegradedReadOnly, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
}
func TestRefillMetabaseCorrupted(t *testing.T) {
@@ -126,7 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}),
}
- mm := newMetricStore()
+ mm := NewMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
@@ -146,7 +146,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
putPrm.SetObject(obj)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
addr := object.AddressOf(obj)
// This is copied from `fstree.treePath()` to avoid exporting function just for tests.
@@ -170,7 +170,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
getPrm.SetAddress(addr)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
}
func TestRefillMetabase(t *testing.T) {
@@ -190,7 +190,7 @@ func TestRefillMetabase(t *testing.T) {
}),
}
- mm := newMetricStore()
+ mm := NewMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
@@ -216,7 +216,7 @@ func TestRefillMetabase(t *testing.T) {
locked := make([]oid.ID, 1, 2)
locked[0] = oidtest.ID()
cnrLocked := cidtest.ID()
- for range objNum {
+ for i := uint64(0); i < objNum; i++ {
obj := objecttest.Object()
obj.SetType(objectSDK.TypeRegular)
@@ -358,7 +358,7 @@ func TestRefillMetabase(t *testing.T) {
phyBefore := c.Phy
logicalBefore := c.Logic
- err = sh.Close(context.Background())
+ err = sh.Close()
require.NoError(t, err)
sh = New(
@@ -379,7 +379,7 @@ func TestRefillMetabase(t *testing.T) {
// initialize Blobstor
require.NoError(t, sh.Init(context.Background()))
- defer sh.Close(context.Background())
+ defer sh.Close()
checkAllObjs(false)
checkObj(object.AddressOf(tombObj), nil)
diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go
index 8dc1f0522..b3bc6a30b 100644
--- a/pkg/local_object_storage/shard/count.go
+++ b/pkg/local_object_storage/shard/count.go
@@ -23,12 +23,6 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) {
return 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
-
cc, err := s.metaBase.ObjectCounters()
if err != nil {
return 0, err
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index 0101817a8..c898fdf41 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -54,12 +55,6 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del
return DeleteRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return DeleteRes{}, err
- }
- defer release()
-
result := DeleteRes{}
for _, addr := range prm.addr {
select {
@@ -100,7 +95,7 @@ func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr
}
_, err := s.writeCache.Head(ctx, addr)
if err == nil {
- s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
+ s.log.Warn(logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
return fmt.Errorf("object %s must be flushed from writecache", addr)
}
if client.IsErrObjectNotFound(err) {
@@ -115,9 +110,10 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
res, err := s.metaBase.StorageID(ctx, sPrm)
if err != nil {
- s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
+ s.log.Debug(logs.StorageIDRetrievalFailure,
zap.Stringer("object", addr),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
}
storageID := res.StorageID()
@@ -134,9 +130,10 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
_, err = s.blobStor.Delete(ctx, delPrm)
if err != nil && !client.IsErrObjectNotFound(err) {
- s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
+ s.log.Debug(logs.ObjectRemovalFailureBlobStor,
zap.Stringer("object_address", addr),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
}
return nil
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index c9ce93bc5..9f205fa5d 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -3,6 +3,7 @@ package shard
import (
"context"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
@@ -37,7 +38,7 @@ func TestShard_Delete_BigObject(t *testing.T) {
func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
@@ -57,14 +58,19 @@ func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = sh.Get(context.Background(), getPrm)
+ _, err = testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
if hasWriteCache {
- require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false}))
+ sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false})
+ require.Eventually(t, func() bool {
+ _, err = sh.Delete(context.Background(), delPrm)
+ return err == nil
+ }, 30*time.Second, 10*time.Millisecond)
+ } else {
+ _, err = sh.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
}
- _, err = sh.Delete(context.Background(), delPrm)
- require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index 2c11b6b01..784bf293a 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -18,7 +18,7 @@ type ExistsPrm struct {
// Exists option to set object checked for existence.
Address oid.Address
// Exists option to set parent object checked for existence.
- ECParentAddress oid.Address
+ ParentAddress oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
@@ -53,6 +53,10 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
))
defer span.End()
+ var exists bool
+ var locked bool
+ var err error
+
s.m.RLock()
defer s.m.RUnlock()
@@ -60,18 +64,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
return ExistsRes{}, ErrShardDisabled
} else if s.info.EvacuationInProgress {
return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ExistsRes{}, err
- }
- defer release()
-
- var exists bool
- var locked bool
-
- if s.info.Mode.NoMetabase() {
+ } else if s.info.Mode.NoMetabase() {
var p common.ExistsPrm
p.Address = prm.Address
@@ -81,7 +74,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
} else {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(prm.Address)
- existsPrm.SetECParent(prm.ECParentAddress)
+ existsPrm.SetParent(prm.ParentAddress)
var res meta.ExistsRes
res, err = s.metaBase.Exists(ctx, existsPrm)
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index a262a52cb..d605746e8 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -6,13 +6,11 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -33,14 +31,41 @@ type TombstoneSource interface {
IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool
}
-type newEpochHandler func(context.Context, uint64)
+// Event represents class of external events.
+type Event interface {
+ typ() eventType
+}
-type newEpochHandlers struct {
+type eventType int
+
+const (
+ _ eventType = iota
+ eventNewEpoch
+)
+
+type newEpoch struct {
+ epoch uint64
+}
+
+func (e newEpoch) typ() eventType {
+ return eventNewEpoch
+}
+
+// EventNewEpoch returns new epoch event.
+func EventNewEpoch(e uint64) Event {
+ return newEpoch{
+ epoch: e,
+ }
+}
+
+type eventHandler func(context.Context, Event)
+
+type eventHandlers struct {
prevGroup sync.WaitGroup
cancelFunc context.CancelFunc
- handlers []newEpochHandler
+ handlers []eventHandler
}
type gcRunResult struct {
@@ -82,10 +107,10 @@ type gc struct {
remover func(context.Context) gcRunResult
- // newEpochChan is used only for listening for the new epoch event.
+ // eventChan is used only for listening for the new epoch event.
// It is ok to keep opened, we are listening for context done when writing in it.
- newEpochChan chan uint64
- newEpochHandlers *newEpochHandlers
+ eventChan chan Event
+ mEventHandler map[eventType]*eventHandlers
}
type gcCfg struct {
@@ -106,7 +131,7 @@ type gcCfg struct {
func defaultGCCfg() gcCfg {
return gcCfg{
removerInterval: 10 * time.Second,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
workerPoolInit: func(int) util.WorkerPool {
return nil
},
@@ -115,8 +140,16 @@ func defaultGCCfg() gcCfg {
}
func (gc *gc) init(ctx context.Context) {
- gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers))
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
+ sz := 0
+
+ for _, v := range gc.mEventHandler {
+ sz += len(v.handlers)
+ }
+
+ if sz > 0 {
+ gc.workerPool = gc.workerPoolInit(sz)
+ }
+
gc.wg.Add(2)
go gc.tickRemover(ctx)
go gc.listenEvents(ctx)
@@ -128,14 +161,14 @@ func (gc *gc) listenEvents(ctx context.Context) {
for {
select {
case <-gc.stopChannel:
- gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel)
+ gc.log.Warn(logs.ShardStopEventListenerByClosedStopChannel)
return
case <-ctx.Done():
- gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
+ gc.log.Warn(logs.ShardStopEventListenerByContext)
return
- case event, ok := <-gc.newEpochChan:
+ case event, ok := <-gc.eventChan:
if !ok {
- gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
+ gc.log.Warn(logs.ShardStopEventListenerByClosedEventChannel)
return
}
@@ -144,38 +177,43 @@ func (gc *gc) listenEvents(ctx context.Context) {
}
}
-func (gc *gc) handleEvent(ctx context.Context, epoch uint64) {
- gc.newEpochHandlers.cancelFunc()
- gc.newEpochHandlers.prevGroup.Wait()
+func (gc *gc) handleEvent(ctx context.Context, event Event) {
+ v, ok := gc.mEventHandler[event.typ()]
+ if !ok {
+ return
+ }
+
+ v.cancelFunc()
+ v.prevGroup.Wait()
var runCtx context.Context
- runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx)
+ runCtx, v.cancelFunc = context.WithCancel(ctx)
- gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers))
+ v.prevGroup.Add(len(v.handlers))
- for i := range gc.newEpochHandlers.handlers {
+ for i := range v.handlers {
select {
case <-ctx.Done():
return
default:
}
- h := gc.newEpochHandlers.handlers[i]
+ h := v.handlers[i]
err := gc.workerPool.Submit(func() {
- defer gc.newEpochHandlers.prevGroup.Done()
- h(runCtx, epoch)
+ defer v.prevGroup.Done()
+ h(runCtx, event)
})
if err != nil {
- gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
- zap.Error(err),
+ gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
+ zap.String("error", err.Error()),
)
- gc.newEpochHandlers.prevGroup.Done()
+ v.prevGroup.Done()
}
}
}
-func (gc *gc) releaseResources(ctx context.Context) {
+func (gc *gc) releaseResources() {
if gc.workerPool != nil {
gc.workerPool.Release()
}
@@ -184,7 +222,7 @@ func (gc *gc) releaseResources(ctx context.Context) {
// because it is possible that we are close it earlier than stop writing.
// It is ok to keep it opened.
- gc.log.Debug(ctx, logs.ShardGCIsStopped)
+ gc.log.Debug(logs.ShardGCIsStopped)
}
func (gc *gc) tickRemover(ctx context.Context) {
@@ -198,10 +236,10 @@ func (gc *gc) tickRemover(ctx context.Context) {
case <-ctx.Done():
// Context canceled earlier than we start to close shards.
// It make sense to stop collecting garbage by context too.
- gc.releaseResources(ctx)
+ gc.releaseResources()
return
case <-gc.stopChannel:
- gc.releaseResources(ctx)
+ gc.releaseResources()
return
case <-timer.C:
startedAt := time.Now()
@@ -220,16 +258,13 @@ func (gc *gc) tickRemover(ctx context.Context) {
}
}
-func (gc *gc) stop(ctx context.Context) {
+func (gc *gc) stop() {
gc.onceStop.Do(func() {
close(gc.stopChannel)
})
- gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop)
+ gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
-
- gc.newEpochHandlers.cancelFunc()
- gc.newEpochHandlers.prevGroup.Wait()
}
// iterates over metabase and deletes objects
@@ -251,47 +286,8 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
return
}
- s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted)
- defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted)
-
- buf, err := s.getGarbage(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
- zap.Error(err),
- )
-
- return
- } else if len(buf) == 0 {
- result.success = true
- return
- }
-
- var deletePrm DeletePrm
- deletePrm.SetAddresses(buf...)
-
- // delete accumulated objects
- res, err := s.delete(ctx, deletePrm, true)
-
- result.deleted = res.deleted
- result.failedToDelete = uint64(len(buf)) - res.deleted
- result.success = true
-
- if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
- zap.Error(err),
- )
- result.success = false
- }
-
- return
-}
-
-func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
+ s.log.Debug(logs.ShardGCRemoveGarbageStarted)
+ defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted)
buf := make([]oid.Address, 0, s.rmBatchSize)
@@ -312,20 +308,47 @@ func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
return nil
})
- if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil {
- return nil, err
+ // iterate over metabase's objects with GC mark
+ // (no more than s.rmBatchSize objects)
+ err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
+ if err != nil {
+ s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ } else if len(buf) == 0 {
+ result.success = true
+ return
}
- return buf, nil
-}
+ var deletePrm DeletePrm
+ deletePrm.SetAddresses(buf...)
+
+ // delete accumulated objects
+ res, err := s.delete(ctx, deletePrm, true)
+
+ result.deleted = res.deleted
+ result.failedToDelete = uint64(len(buf)) - res.deleted
+ result.success = true
+
+ if err != nil {
+ s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
+ zap.String("error", err.Error()),
+ )
+ result.success = false
+ }
-func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
- workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
- batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
return
}
-func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
+func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
+ workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount)
+ batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize)
+ return
+}
+
+func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -333,8 +356,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
}()
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch))
+ s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -343,7 +366,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock {
batch = append(batch, o.Address())
@@ -373,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
+ s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
}
}
@@ -391,25 +414,24 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
return
}
- s.handleExpiredObjectsUnsafe(ctx, expired)
-}
-
-func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) {
- select {
- case <-ctx.Done():
- return
- default:
- }
-
expired, err := s.getExpiredWithLinked(ctx, expired)
if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
+ s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
return
}
- res, err := s.inhumeGC(ctx, expired)
+ var inhumePrm meta.InhumePrm
+
+ inhumePrm.SetAddresses(expired...)
+ inhumePrm.SetGCMark()
+
+ // inhume the collected objects
+ res, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err))
+ s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
+ zap.String("error", err.Error()),
+ )
+
return
}
@@ -427,12 +449,6 @@ func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Ad
}
func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
-
result := make([]oid.Address, 0, len(source))
parentToChildren, err := s.metaBase.GetChildren(ctx, source)
if err != nil {
@@ -446,20 +462,7 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address)
return result, nil
}
-func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) {
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return meta.InhumeRes{}, err
- }
- defer release()
-
- var inhumePrm meta.InhumePrm
- inhumePrm.SetAddresses(addrs...)
- inhumePrm.SetGCMark()
- return s.metaBase.Inhume(ctx, inhumePrm)
-}
-
-func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
+func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -467,10 +470,11 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone)
}()
+ epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
- log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
- defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling)
+ log.Debug(logs.ShardStartedExpiredTombstonesHandling)
+ defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
const tssDeleteBatch = 50
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@@ -488,29 +492,22 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
})
for {
- log.Debug(ctx, logs.ShardIteratingTombstones)
+ log.Debug(logs.ShardIteratingTombstones)
s.m.RLock()
if s.info.Mode.NoMetabase() {
- s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
+ s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
s.m.RUnlock()
return
}
- var release qos.ReleaseFunc
- release, err = s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
- s.m.RUnlock()
- return
- }
err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
- release()
if err != nil {
- log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+ log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
+
return
}
@@ -527,7 +524,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
}
}
- log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
+ log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
if len(tssExp) > 0 {
s.expiredTombstonesCallback(ctx, tssExp)
}
@@ -538,7 +535,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
}
}
-func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
+func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -546,8 +543,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
}()
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch))
+ s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -557,14 +554,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() == objectSDK.TypeLock {
batch = append(batch, o.Address())
if len(batch) == batchSize {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, epoch, expired)
+ s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
batch = make([]oid.Address, 0, batchSize)
@@ -578,7 +575,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
if len(batch) > 0 {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, epoch, expired)
+ s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
}
@@ -587,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
+ s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
}
}
@@ -599,13 +596,7 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
return ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
+ err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
select {
case <-ctx.Done():
return meta.ErrInterruptIterator
@@ -621,11 +612,12 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
}
func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
}
- defer release()
return s.metaBase.FilterExpired(ctx, epoch, addresses)
}
@@ -635,22 +627,28 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid
//
// Does not modify tss.
func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
+ if s.GetMode().NoMetabase() {
return
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
- return
+ // Mark tombstones as garbage.
+ var pInhume meta.InhumePrm
+
+ tsAddrs := make([]oid.Address, 0, len(tss))
+ for _, ts := range tss {
+ tsAddrs = append(tsAddrs, ts.Tombstone())
}
- res, err := s.metaBase.InhumeTombstones(ctx, tss)
- release()
+
+ pInhume.SetGCMark()
+ pInhume.SetAddresses(tsAddrs...)
+
+ // inhume tombstones
+ res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
+ s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
+ zap.String("error", err.Error()),
+ )
+
return
}
@@ -665,27 +663,26 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size))
i++
}
+
+ // drop just processed expired tombstones
+ // from graveyard
+ err = s.metaBase.DropGraves(ctx, tss)
+ if err != nil {
+ s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
+ }
}
// HandleExpiredLocks unlocks all objects which were locked by lockers.
// If successful, marks lockers themselves as garbage.
func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return
- }
-
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ if s.GetMode().NoMetabase() {
return
}
unlocked, err := s.metaBase.FreeLockedBy(lockers)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToUnlockObjects,
+ zap.String("error", err.Error()),
+ )
return
}
@@ -693,15 +690,13 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
var pInhume meta.InhumePrm
pInhume.SetAddresses(lockers...)
pInhume.SetForceGCMark()
- release, err = s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
- return
- }
+
res, err := s.metaBase.Inhume(ctx, pInhume)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
+ zap.String("error", err.Error()),
+ )
+
return
}
@@ -723,7 +718,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
return
}
@@ -731,57 +726,47 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc
return
}
- s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked)
+ s.handleExpiredObjects(ctx, expiredUnlocked)
}
// HandleDeletedLocks unlocks all objects which were locked by lockers.
-func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
+func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
+ if s.GetMode().NoMetabase() {
return
}
- release, err := s.opsLimiter.WriteRequest(ctx)
+ _, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
- return
- }
- _, err = s.metaBase.FreeLockedBy(lockers)
- release()
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToUnlockObjects,
+ zap.String("error", err.Error()),
+ )
+
return
}
}
-// NotificationChannel returns channel for new epoch events.
-func (s *Shard) NotificationChannel() chan<- uint64 {
- return s.gc.newEpochChan
+// NotificationChannel returns channel for shard events.
+func (s *Shard) NotificationChannel() chan<- Event {
+ return s.gc.eventChan
}
-func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) {
+func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics")
defer span.End()
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
+ epoch := e.(newEpoch).epoch
+
+ s.log.Debug(logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
s.collectExpiredContainerSizeMetrics(ctx, epoch)
s.collectExpiredContainerCountMetrics(ctx, epoch)
}
func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
- return
- }
ids, err := s.metaBase.ZeroSizeContainers(ctx)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ s.log.Warn(logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
return
}
if len(ids) == 0 {
@@ -791,15 +776,9 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui
}
func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
- return
- }
ids, err := s.metaBase.ZeroCountContainers(ctx)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ s.log.Warn(logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
return
}
if len(ids) == 0 {
diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go
index 54d2f1510..3993593ad 100644
--- a/pkg/local_object_storage/shard/gc_internal_test.go
+++ b/pkg/local_object_storage/shard/gc_internal_test.go
@@ -37,8 +37,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -62,8 +61,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
meta.WithEpochState(epochState{}),
),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
@@ -74,13 +73,13 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
return pool
}),
WithGCRemoverSleepInterval(1 * time.Second),
- WithDisabledGC(),
}
sh = New(opts...)
+ sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} }
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index f512a488a..90958cd35 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -5,13 +5,13 @@ import (
"errors"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -34,7 +34,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
@@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
require.NoError(t, err)
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), epoch.Value)
+ sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
var getPrm GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
@@ -131,7 +131,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
@@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
require.True(t, errors.As(err, &splitInfoError), "split info must be provided")
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), epoch.Value)
+ sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")
@@ -190,7 +190,7 @@ func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool
additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
obj := testutil.GenerateObjectWithSize(1024)
@@ -254,7 +254,7 @@ func TestGCDontDeleteObjectFromWritecache(t *testing.T) {
additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
obj := testutil.GenerateObjectWithSize(1024)
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 28f8912be..d1c393613 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -10,6 +10,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -111,12 +112,6 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
return c.Get(ctx, prm.addr)
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return GetRes{}, err
- }
- defer release()
-
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
@@ -149,7 +144,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
} else {
- s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
+ s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
}
if s.hasWriteCache() {
@@ -158,14 +153,16 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return res, false, err
}
if client.IsErrObjectNotFound(err) {
- s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache,
+ s.log.Debug(logs.ShardObjectIsMissingInWritecache,
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta))
+ zap.Bool("skip_meta", skipMeta),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
} else {
- s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache,
+ s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
zap.Error(err),
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta))
+ zap.Bool("skip_meta", skipMeta),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
if skipMeta || mErr != nil {
@@ -178,7 +175,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
- return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err)
+ return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
}
storageID := mExRes.StorageID()
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index 837991b73..8a7c6972d 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -5,9 +5,11 @@ import (
"context"
"errors"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -30,7 +32,7 @@ func TestShard_Get(t *testing.T) {
func testShardGet(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
var putPrm PutPrm
var getPrm GetPrm
@@ -47,7 +49,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := sh.Get(context.Background(), getPrm)
+ res, err := testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -65,7 +67,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := sh.Get(context.Background(), getPrm)
+ res, err := testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -93,13 +95,13 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(child))
- res, err := sh.Get(context.Background(), getPrm)
+ res, err := testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
require.True(t, binaryEqual(child, res.Object()))
getPrm.SetAddress(object.AddressOf(parent))
- _, err = sh.Get(context.Background(), getPrm)
+ _, err = testGet(t, sh, getPrm, hasWriteCache)
var si *objectSDK.SplitInfoError
require.True(t, errors.As(err, &si))
@@ -113,6 +115,19 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
})
}
+func testGet(t *testing.T, sh *Shard, getPrm GetPrm, hasWriteCache bool) (GetRes, error) {
+ res, err := sh.Get(context.Background(), getPrm)
+ if hasWriteCache {
+ require.Eventually(t, func() bool {
+ if client.IsErrObjectNotFound(err) {
+ res, err = sh.Get(context.Background(), getPrm)
+ }
+ return !client.IsErrObjectNotFound(err)
+ }, time.Second, time.Millisecond*100)
+ }
+ return res, err
+}
+
// binary equal is used when object contains empty lists in the structure and
// requre.Equal fails on comparing and []{} lists.
func binaryEqual(a, b *objectSDK.Object) bool {
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index 34b8290d6..ff57e3bf9 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -81,12 +81,6 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
headParams.SetAddress(prm.addr)
headParams.SetRaw(prm.raw)
- release, limitErr := s.opsLimiter.ReadRequest(ctx)
- if limitErr != nil {
- return HeadRes{}, limitErr
- }
- defer release()
-
var res meta.GetRes
res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index deb3019df..1f4631993 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -4,9 +4,11 @@ import (
"context"
"errors"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
@@ -28,7 +30,7 @@ func TestShard_Head(t *testing.T) {
func testShardHead(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
var putPrm PutPrm
var headPrm HeadPrm
@@ -44,7 +46,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
headPrm.SetAddress(object.AddressOf(obj))
- res, err := sh.Head(context.Background(), headPrm)
+ res, err := testHead(t, sh, headPrm, hasWriteCache)
require.NoError(t, err)
require.Equal(t, obj.CutPayload(), res.Object())
})
@@ -72,7 +74,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
var siErr *objectSDK.SplitInfoError
- _, err = sh.Head(context.Background(), headPrm)
+ _, err = testHead(t, sh, headPrm, hasWriteCache)
require.True(t, errors.As(err, &siErr))
headPrm.SetAddress(object.AddressOf(parent))
@@ -83,3 +85,16 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
require.Equal(t, parent.CutPayload(), head.Object())
})
}
+
+func testHead(t *testing.T, sh *Shard, headPrm HeadPrm, hasWriteCache bool) (HeadRes, error) {
+ res, err := sh.Head(context.Background(), headPrm)
+ if hasWriteCache {
+ require.Eventually(t, func() bool {
+ if client.IsErrObjectNotFound(err) {
+ res, err = sh.Head(context.Background(), headPrm)
+ }
+ return !client.IsErrObjectNotFound(err)
+ }, time.Second, time.Millisecond*100)
+ }
+ return res, err
+}
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index 7391adef2..2fe68d270 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -1,11 +1,11 @@
package shard
import (
- "context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/mr-tron/base58"
"go.uber.org/zap"
)
@@ -31,12 +31,12 @@ func (s *Shard) ID() *ID {
}
// UpdateID reads shard ID saved in the metabase and updates it if it is missing.
-func (s *Shard) UpdateID(ctx context.Context) (err error) {
+func (s *Shard) UpdateID() (err error) {
var idFromMetabase []byte
modeDegraded := s.GetMode().NoMetabase()
if !modeDegraded {
- if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
- err = fmt.Errorf("read shard id from metabase: %w", err)
+ if idFromMetabase, err = s.metaBase.GetShardID(mode.ReadOnly); err != nil {
+ err = fmt.Errorf("failed to read shard id from metabase: %w", err)
}
}
@@ -45,12 +45,14 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
}
shardID := s.info.ID.String()
- s.metricsWriter.SetShardID(shardID)
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.SetShardID(shardID)
+ }
if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID)
}
- s.log = s.log.With(zap.Stringer("shard_id", s.info.ID))
+ s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))}
s.metaBase.SetLogger(s.log)
s.blobStor.SetLogger(s.log)
if s.hasWriteCache() {
@@ -61,11 +63,10 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
if s.pilorama != nil {
s.pilorama.SetParentID(s.info.ID.String())
}
- s.opsLimiter.SetParentID(s.info.ID.String())
if len(idFromMetabase) == 0 && !modeDegraded {
- if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
- err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr))
+ if setErr := s.metaBase.SetShardID(*s.info.ID, s.GetMode()); setErr != nil {
+ err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr))
}
}
return
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index c0fd65f4b..746177c3a 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -81,12 +82,6 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return InhumeRes{}, err
- }
- defer release()
-
if s.hasWriteCache() {
for i := range prm.target {
_ = s.writeCache.Delete(ctx, prm.target[i])
@@ -114,8 +109,9 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrLockObjectRemoval
}
- s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
- zap.Error(err),
+ s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
s.m.RUnlock()
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 1421f0e18..82754568f 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -27,7 +27,7 @@ func TestShard_Inhume(t *testing.T) {
func testShardInhume(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
@@ -48,7 +48,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = sh.Get(context.Background(), getPrm)
+ _, err = testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
_, err = sh.Inhume(context.Background(), inhPrm)
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index af87981ca..08ea81a0c 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -33,30 +34,6 @@ func (r ListContainersRes) Containers() []cid.ID {
return r.containers
}
-// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
-type IterateOverContainersPrm struct {
- // Handler function executed upon containers in db.
- Handler func(context.Context, objectSDK.Type, cid.ID) error
-}
-
-// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
-type IterateOverObjectsInContainerPrm struct {
- // ObjectType type of objects to iterate over.
- ObjectType objectSDK.Type
- // ContainerID container for objects to iterate over.
- ContainerID cid.ID
- // Handler function executed upon objects in db.
- Handler func(context.Context, *objectcore.Info) error
-}
-
-// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation.
-type CountAliveObjectsInContainerPrm struct {
- // ObjectType type of objects to iterate over.
- ObjectType objectSDK.Type
- // ContainerID container for objects to iterate over.
- ContainerID cid.ID
-}
-
// ListWithCursorPrm contains parameters for ListWithCursor operation.
type ListWithCursorPrm struct {
count uint32
@@ -106,15 +83,9 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
return SelectRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return SelectRes{}, err
- }
- defer release()
-
lst, err := s.metaBase.Containers(ctx)
if err != nil {
- return res, fmt.Errorf("list stored containers: %w", err)
+ return res, fmt.Errorf("can't list stored containers: %w", err)
}
filters := objectSDK.NewSearchFilters()
@@ -127,9 +98,10 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase
if err != nil {
- s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
+ s.log.Debug(logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
}
@@ -151,15 +123,9 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo
return ListContainersRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ListContainersRes{}, err
- }
- defer release()
-
containers, err := s.metaBase.Containers(ctx)
if err != nil {
- return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err)
+ return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err)
}
return ListContainersRes{
@@ -185,18 +151,12 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
return ListWithCursorRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ListWithCursorRes{}, err
- }
- defer release()
-
var metaPrm meta.ListPrm
metaPrm.SetCount(prm.count)
metaPrm.SetCursor(prm.cursor)
res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
if err != nil {
- return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err)
+ return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err)
}
return ListWithCursorRes{
@@ -204,96 +164,3 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
cursor: res.Cursor(),
}, nil
}
-
-// IterateOverContainers lists physical containers presented in shard.
-func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
- _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers",
- trace.WithAttributes(
- attribute.Bool("has_handler", prm.Handler != nil),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- var metaPrm meta.IterateOverContainersPrm
- metaPrm.Handler = prm.Handler
- err = s.metaBase.IterateOverContainers(ctx, metaPrm)
- if err != nil {
- return fmt.Errorf("iterate over containers: %w", err)
- }
-
- return nil
-}
-
-// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name.
-func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
- _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer",
- trace.WithAttributes(
- attribute.Bool("has_handler", prm.Handler != nil),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- var metaPrm meta.IterateOverObjectsInContainerPrm
- metaPrm.ContainerID = prm.ContainerID
- metaPrm.ObjectType = prm.ObjectType
- metaPrm.Handler = prm.Handler
- err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
- if err != nil {
- return fmt.Errorf("iterate over objects: %w", err)
- }
-
- return nil
-}
-
-// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
-func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
- _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket")
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return 0, ErrDegradedMode
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
-
- var metaPrm meta.CountAliveObjectsInContainerPrm
- metaPrm.ObjectType = prm.ObjectType
- metaPrm.ContainerID = prm.ContainerID
- count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
- if err != nil {
- return 0, fmt.Errorf("count alive objects in bucket: %w", err)
- }
-
- return count, nil
-}
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index 139b2e316..3414dc76a 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -18,14 +18,14 @@ func TestShard_List(t *testing.T) {
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
testShardList(t, sh)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
shWC := newShard(t, true)
- defer func() { require.NoError(t, shWC.Close(context.Background())) }()
+ defer func() { require.NoError(t, shWC.Close()) }()
testShardList(t, shWC)
})
}
diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go
index 9c392fdac..4a8d89d63 100644
--- a/pkg/local_object_storage/shard/lock.go
+++ b/pkg/local_object_storage/shard/lock.go
@@ -38,13 +38,7 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- err = s.metaBase.Lock(ctx, idCnr, locker, locked)
+ err := s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@@ -67,12 +61,6 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return false, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return false, err
- }
- defer release()
-
var prm meta.IsLockedPrm
prm.SetAddress(addr)
@@ -84,10 +72,10 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return res.Locked(), nil
}
-// GetLocks return lock id's of the provided object. Not found object is
+// GetLocked return lock id's of the provided object. Not found object is
// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
-func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks",
+func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocked",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", addr.EncodeToString()),
@@ -98,12 +86,5 @@ func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error
if m.NoMetabase() {
return nil, ErrDegradedMode
}
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
-
- return s.metaBase.GetLocks(ctx, addr)
+ return s.metaBase.GetLocked(ctx, addr)
}
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 3878a65cd..9ce95feb1 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -28,10 +28,9 @@ func TestShard_Lock(t *testing.T) {
var sh *Shard
rootPath := t.TempDir()
- l := logger.NewLoggerWrapper(zap.NewNop())
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
- WithLogger(l),
+ WithLogger(&logger.Logger{Logger: zap.NewNop()}),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -54,8 +53,8 @@ func TestShard_Lock(t *testing.T) {
meta.WithPath(filepath.Join(rootPath, "meta")),
meta.WithEpochState(epochState{}),
),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
}
@@ -63,7 +62,7 @@ func TestShard_Lock(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
@@ -149,7 +148,7 @@ func TestShard_Lock(t *testing.T) {
func TestShard_IsLocked(t *testing.T) {
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
deleted file mode 100644
index 087ba42ef..000000000
--- a/pkg/local_object_storage/shard/metrics.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package shard
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
-
-// MetricsWriter is an interface that must store shard's metrics.
-type MetricsWriter interface {
- // SetObjectCounter must set object counter taking into account object type.
- SetObjectCounter(objectType string, v uint64)
- // AddToObjectCounter must update object counter taking into account object
- // type.
- // Negative parameter must decrease the counter.
- AddToObjectCounter(objectType string, delta int)
- // AddToContainerSize must add a value to the container size.
- // Value can be negative.
- AddToContainerSize(cnr string, value int64)
- // AddToPayloadSize must add a value to the payload size.
- // Value can be negative.
- AddToPayloadSize(value int64)
- // IncObjectCounter must increment shard's object counter taking into account
- // object type.
- IncObjectCounter(objectType string)
- // SetShardID must set (update) the shard identifier that will be used in
- // metrics.
- SetShardID(id string)
- // SetMode set mode of shard.
- SetMode(mode mode.Mode)
- // SetContainerObjectsCount sets container object count.
- SetContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncContainerObjectsCount increments container object count.
- IncContainerObjectsCount(cnrID string, objectType string)
- // SubContainerObjectsCount subtracts container object count.
- SubContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncRefillObjectsCount increments refill objects count.
- IncRefillObjectsCount(path string, size int, success bool)
- // SetRefillPercent sets refill percent.
- SetRefillPercent(path string, percent uint32)
- // SetRefillStatus sets refill status.
- SetRefillStatus(path string, status string)
- // SetEvacuationInProgress sets evacuation status
- SetEvacuationInProgress(value bool)
-}
-
-type noopMetrics struct{}
-
-var _ MetricsWriter = noopMetrics{}
-
-func (noopMetrics) SetObjectCounter(string, uint64) {}
-func (noopMetrics) AddToObjectCounter(string, int) {}
-func (noopMetrics) AddToContainerSize(string, int64) {}
-func (noopMetrics) AddToPayloadSize(int64) {}
-func (noopMetrics) IncObjectCounter(string) {}
-func (noopMetrics) SetShardID(string) {}
-func (noopMetrics) SetMode(mode.Mode) {}
-func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {}
-func (noopMetrics) IncContainerObjectsCount(string, string) {}
-func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {}
-func (noopMetrics) IncRefillObjectsCount(string, int, bool) {}
-func (noopMetrics) SetRefillPercent(string, uint32) {}
-func (noopMetrics) SetRefillStatus(string, string) {}
-func (noopMetrics) SetEvacuationInProgress(bool) {}
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 5230dcad0..01a85da97 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -17,7 +17,6 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
@@ -35,7 +34,7 @@ type metricsStore struct {
refillStatus string
}
-func newMetricStore() *metricsStore {
+func NewMetricStore() *metricsStore {
return &metricsStore{
objCounters: map[string]uint64{
"phy": 0,
@@ -201,11 +200,11 @@ func TestCounters(t *testing.T) {
dir := t.TempDir()
sh, mm := shardWithMetrics(t, dir)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
- sh.SetMode(context.Background(), mode.ReadOnly)
+ sh.SetMode(mode.ReadOnly)
require.Equal(t, mode.ReadOnly, mm.mode)
- sh.SetMode(context.Background(), mode.ReadWrite)
+ sh.SetMode(mode.ReadWrite)
require.Equal(t, mode.ReadWrite, mm.mode)
const objNumber = 10
@@ -309,19 +308,17 @@ func TestCounters(t *testing.T) {
t.Run("inhume_TS", func(t *testing.T) {
var prm InhumePrm
+ ts := objectcore.AddressOf(testutil.GenerateObject())
phy := mm.getObjectCounter(physical)
logic := mm.getObjectCounter(logical)
custom := mm.getObjectCounter(user)
inhumedNumber := int(phy / 4)
- for _, o := range addrFromObjs(oo[:inhumedNumber]) {
- ts := oidtest.Address()
- ts.SetContainer(o.Container())
- prm.SetTarget(ts, o)
- _, err := sh.Inhume(context.Background(), prm)
- require.NoError(t, err)
- }
+ prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
+
+ _, err := sh.Inhume(context.Background(), prm)
+ require.NoError(t, err)
for i := range inhumedNumber {
cid, ok := oo[i].ContainerID()
@@ -404,7 +401,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
}),
}
- mm := newMetricStore()
+ mm := NewMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index 901528976..1bab57448 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -1,8 +1,6 @@
package shard
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -20,21 +18,19 @@ var ErrDegradedMode = logicerr.New("shard is in degraded mode")
//
// Returns any error encountered that did not allow
// setting shard mode.
-func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error {
+func (s *Shard) SetMode(m mode.Mode) error {
unlock := s.lockExclusive()
defer unlock()
- return s.setMode(ctx, m)
+ return s.setMode(m)
}
-func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
- s.log.Info(ctx, logs.ShardSettingShardMode,
+func (s *Shard) setMode(m mode.Mode) error {
+ s.log.Info(logs.ShardSettingShardMode,
zap.Stringer("old_mode", s.info.Mode),
zap.Stringer("new_mode", m))
- components := []interface {
- SetMode(context.Context, mode.Mode) error
- }{
+ components := []interface{ SetMode(mode.Mode) error }{
s.metaBase, s.blobStor,
}
@@ -62,16 +58,18 @@ func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
if !m.Disabled() {
for i := range components {
- if err := components[i].SetMode(ctx, m); err != nil {
+ if err := components[i].SetMode(m); err != nil {
return err
}
}
}
s.info.Mode = m
- s.metricsWriter.SetMode(s.info.Mode)
+ if s.metricsWriter != nil {
+ s.metricsWriter.SetMode(s.info.Mode)
+ }
- s.log.Info(ctx, logs.ShardShardModeSetSuccessfully,
+ s.log.Info(logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
return nil
}
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index f8cb00a31..d7a9e7012 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -17,8 +17,7 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
- indexAttributes bool
+ obj *objectSDK.Object
}
// PutRes groups the resulting values of Put operation.
@@ -29,10 +28,6 @@ func (p *PutPrm) SetObject(obj *objectSDK.Object) {
p.obj = obj
}
-func (p *PutPrm) SetIndexAttributes(v bool) {
- p.indexAttributes = v
-}
-
// Put saves the object in shard.
//
// Returns any error encountered that
@@ -67,12 +62,6 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var res common.PutRes
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return PutRes{}, err
- }
- defer release()
-
// exist check are not performed there, these checks should be executed
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
@@ -81,13 +70,13 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
}
if err != nil || !tryCache {
if err != nil {
- s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
- zap.Error(err))
+ s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
+ zap.String("err", err.Error()))
}
res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
- return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err)
+ return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
}
}
@@ -95,12 +84,11 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
- pPrm.SetIndexAttributes(prm.indexAttributes)
res, err := s.metaBase.Put(ctx, pPrm)
if err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
- return PutRes{}, fmt.Errorf("put object to metabase: %w", err)
+ return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
}
if res.Inserted {
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index 443689104..701268820 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -131,12 +131,6 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
return obj, nil
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return RngRes{}, err
- }
- defer release()
-
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go
index 06fe9f511..cc73db316 100644
--- a/pkg/local_object_storage/shard/range_test.go
+++ b/pkg/local_object_storage/shard/range_test.go
@@ -79,8 +79,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -95,7 +94,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
}),
},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 20f1f2b6f..0d83caa0c 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -6,13 +6,10 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -21,9 +18,37 @@ import (
var ErrRebuildInProgress = errors.New("shard rebuild in progress")
+type RebuildWorkerLimiter interface {
+ AcquireWorkSlot(ctx context.Context) error
+ ReleaseWorkSlot()
+}
+
+type rebuildLimiter struct {
+ semaphore chan struct{}
+}
+
+func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter {
+ return &rebuildLimiter{
+ semaphore: make(chan struct{}, workersCount),
+ }
+}
+
+func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error {
+ select {
+ case l.semaphore <- struct{}{}:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (l *rebuildLimiter) ReleaseWorkSlot() {
+ <-l.semaphore
+}
+
type rebuildTask struct {
- concurrencyLimiter common.RebuildLimiter
- fillPercent int
+ limiter RebuildWorkerLimiter
+ fillPercent int
}
type rebuilder struct {
@@ -63,37 +88,36 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D
if !ok {
continue
}
- runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter)
+ runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter)
}
}
}()
}
func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
- fillPercent int, concLimiter common.RebuildLimiter,
+ fillPercent int, limiter RebuildWorkerLimiter,
) {
select {
case <-ctx.Done():
return
default:
}
- log.Info(ctx, logs.BlobstoreRebuildStarted)
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil {
- log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err))
+ log.Info(logs.BlobstoreRebuildStarted)
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil {
+ log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
} else {
- log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully)
+ log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
}
}
-func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int,
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int,
) error {
select {
case <-ctx.Done():
return ctx.Err()
case r.tasks <- rebuildTask{
- concurrencyLimiter: limiter,
- fillPercent: fillPercent,
+ limiter: limiter,
+ fillPercent: fillPercent,
}:
return nil
default:
@@ -101,7 +125,7 @@ func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildL
}
}
-func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) {
+func (r *rebuilder) Stop(log *logger.Logger) {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -114,7 +138,7 @@ func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) {
r.wg.Wait()
r.cancel = nil
r.done = nil
- log.Info(ctx, logs.BlobstoreRebuildStopped)
+ log.Info(logs.BlobstoreRebuildStopped)
}
var errMBIsNotAvailable = errors.New("metabase is not available")
@@ -142,7 +166,7 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres
}
type RebuildPrm struct {
- ConcurrencyLimiter common.ConcurrencyLimiter
+ ConcurrencyLimiter RebuildWorkerLimiter
TargetFillPercent uint32
}
@@ -164,30 +188,5 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
return ErrDegradedMode
}
- limiter := &rebuildLimiter{
- concurrencyLimiter: p.ConcurrencyLimiter,
- rateLimiter: s.opsLimiter,
- }
- return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent))
-}
-
-var _ common.RebuildLimiter = (*rebuildLimiter)(nil)
-
-type rebuildLimiter struct {
- concurrencyLimiter common.ConcurrencyLimiter
- rateLimiter qos.Limiter
-}
-
-func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
- return r.concurrencyLimiter.AcquireWorkSlot(ctx)
-}
-
-func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) {
- release, err := r.rateLimiter.ReadRequest(ctx)
- return common.ReleaseFunc(release), err
-}
-
-func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) {
- release, err := r.rateLimiter.WriteRequest(ctx)
- return common.ReleaseFunc(release), err
+ return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent))
}
diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go
index d90343265..0025bb45a 100644
--- a/pkg/local_object_storage/shard/refill_test.go
+++ b/pkg/local_object_storage/shard/refill_test.go
@@ -34,7 +34,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)},
})
- defer func() { require.NoError(b, sh.Close(context.Background())) }()
+ defer func() { require.NoError(b, sh.Close()) }()
var putPrm PutPrm
@@ -61,7 +61,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, err)
}
- require.NoError(b, sh.Close(context.Background()))
+ require.NoError(b, sh.Close())
require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path))
require.NoError(b, sh.Open(context.Background()))
@@ -72,5 +72,5 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, sh.Init(context.Background()))
- require.NoError(b, sh.Close(context.Background()))
+ require.NoError(b, sh.Close())
}
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index e563f390b..7dacbfa6c 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -51,7 +51,7 @@ func TestShardReload(t *testing.T) {
WithMetaBaseOptions(metaOpts...),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(p, "pilorama"))),
- WithMetricsWriter(newMetricStore()),
+ WithMetricsWriter(NewMetricStore()),
}
sh := New(opts...)
@@ -59,7 +59,7 @@ func TestShardReload(t *testing.T) {
require.NoError(t, sh.Init(context.Background()))
defer func() {
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
}()
objects := make([]objAddr, 5)
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index fbc751e26..1615f5fbe 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -15,9 +15,8 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
- isIndexedContainer bool
+ cnr cid.ID
+ filters objectSDK.SearchFilters
}
// SelectRes groups the resulting values of Select operation.
@@ -26,9 +25,8 @@ type SelectRes struct {
}
// SetContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) {
+func (p *SelectPrm) SetContainerID(cnr cid.ID) {
p.cnr = cnr
- p.isIndexedContainer = isIndexedContainer
}
// SetFilters is a Select option to set the object filters.
@@ -60,20 +58,13 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
return SelectRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return SelectRes{}, nil
- }
- defer release()
-
var selectPrm meta.SelectPrm
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
- selectPrm.SetUseAttributeIndex(prm.isIndexedContainer)
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
- return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err)
+ return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
}
return SelectRes{
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index d89b56266..7496fc352 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -7,8 +7,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -59,6 +57,50 @@ type DeletedLockCallback func(context.Context, []oid.Address)
// EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers.
type EmptyContainersCallback func(context.Context, []cid.ID)
+// MetricsWriter is an interface that must store shard's metrics.
+type MetricsWriter interface {
+ // SetObjectCounter must set object counter taking into account object type.
+ SetObjectCounter(objectType string, v uint64)
+ // AddToObjectCounter must update object counter taking into account object
+ // type.
+ // Negative parameter must decrease the counter.
+ AddToObjectCounter(objectType string, delta int)
+ // AddToContainerSize must add a value to the container size.
+ // Value can be negative.
+ AddToContainerSize(cnr string, value int64)
+ // AddToPayloadSize must add a value to the payload size.
+ // Value can be negative.
+ AddToPayloadSize(value int64)
+ // IncObjectCounter must increment shard's object counter taking into account
+ // object type.
+ IncObjectCounter(objectType string)
+ // SetShardID must set (update) the shard identifier that will be used in
+ // metrics.
+ SetShardID(id string)
+ // SetReadonly must set shard mode.
+ SetMode(mode mode.Mode)
+ // IncErrorCounter increment error counter.
+ IncErrorCounter()
+ // ClearErrorCounter clear error counter.
+ ClearErrorCounter()
+ // DeleteShardMetrics deletes shard metrics from registry.
+ DeleteShardMetrics()
+ // SetContainerObjectsCount sets container object count.
+ SetContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncContainerObjectsCount increments container object count.
+ IncContainerObjectsCount(cnrID string, objectType string)
+ // SubContainerObjectsCount subtracts container object count.
+ SubContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncRefillObjectsCount increments refill objects count.
+ IncRefillObjectsCount(path string, size int, success bool)
+ // SetRefillPercent sets refill percent.
+ SetRefillPercent(path string, percent uint32)
+ // SetRefillStatus sets refill status.
+ SetRefillStatus(path string, status string)
+ // SetEvacuationInProgress sets evacuation status
+ SetEvacuationInProgress(value bool)
+}
+
type cfg struct {
m sync.RWMutex
@@ -96,23 +138,17 @@ type cfg struct {
metricsWriter MetricsWriter
- reportErrorFunc func(ctx context.Context, selfID string, message string, err error)
-
- containerInfo container.InfoProvider
-
- opsLimiter qos.Limiter
+ reportErrorFunc func(selfID string, message string, err error)
}
func defaultCfg() *cfg {
return &cfg{
rmBatchSize: 100,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
gcCfg: defaultGCCfg(),
- reportErrorFunc: func(context.Context, string, string, error) {},
+ reportErrorFunc: func(string, string, error) {},
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
- metricsWriter: noopMetrics{},
- opsLimiter: qos.NewNoopLimiter(),
}
}
@@ -134,8 +170,8 @@ func New(opts ...Option) *Shard {
tsSource: c.tsSource,
}
- reportFunc := func(ctx context.Context, msg string, err error) {
- s.reportErrorFunc(ctx, s.ID().String(), msg, err)
+ reportFunc := func(msg string, err error) {
+ s.reportErrorFunc(s.ID().String(), msg, err)
}
s.blobStor.SetReportErrorFunc(reportFunc)
@@ -205,7 +241,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option {
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = l
- c.gcCfg.log = l.WithTag(logger.TagGC)
+ c.gcCfg.log = l
}
}
@@ -218,7 +254,7 @@ func WithWriteCache(use bool) Option {
// hasWriteCache returns bool if write cache exists on shards.
func (s *Shard) hasWriteCache() bool {
- return s.useWriteCache
+ return s.cfg.useWriteCache
}
// NeedRefillMetabase returns true if metabase is needed to be refilled.
@@ -321,7 +357,7 @@ func WithGCMetrics(v GCMectrics) Option {
// WithReportErrorFunc returns option to specify callback for handling storage-related errors
// in the background workers.
-func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option {
+func WithReportErrorFunc(f func(selfID string, message string, err error)) Option {
return func(c *cfg) {
c.reportErrorFunc = f
}
@@ -365,29 +401,16 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option {
}
}
-// WithContainerInfoProvider returns option to set container info provider.
-func WithContainerInfoProvider(containerInfo container.InfoProvider) Option {
- return func(c *cfg) {
- c.containerInfo = containerInfo
- }
-}
-
-func WithLimiter(l qos.Limiter) Option {
- return func(c *cfg) {
- c.opsLimiter = l
- }
-}
-
func (s *Shard) fillInfo() {
- s.info.MetaBaseInfo = s.metaBase.DumpInfo()
- s.info.BlobStorInfo = s.blobStor.DumpInfo()
- s.info.Mode = s.GetMode()
+ s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
+ s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
+ s.cfg.info.Mode = s.GetMode()
- if s.useWriteCache {
- s.info.WriteCacheInfo = s.writeCache.DumpInfo()
+ if s.cfg.useWriteCache {
+ s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo()
}
if s.pilorama != nil {
- s.info.PiloramaInfo = s.pilorama.DumpInfo()
+ s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo()
}
}
@@ -405,13 +428,13 @@ const (
)
func (s *Shard) updateMetrics(ctx context.Context) {
- if s.GetMode().NoMetabase() {
+ if s.cfg.metricsWriter == nil || s.GetMode().NoMetabase() {
return
}
cc, err := s.metaBase.ObjectCounters()
if err != nil {
- s.log.Warn(ctx, logs.ShardMetaObjectCounterRead,
+ s.log.Warn(logs.ShardMetaObjectCounterRead,
zap.Error(err),
)
@@ -424,7 +447,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
cnrList, err := s.metaBase.Containers(ctx)
if err != nil {
- s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err))
+ s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
return
}
@@ -433,7 +456,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
for i := range cnrList {
size, err := s.metaBase.ContainerSize(cnrList[i])
if err != nil {
- s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize,
+ s.log.Warn(logs.ShardMetaCantReadContainerSize,
zap.String("cid", cnrList[i].EncodeToString()),
zap.Error(err))
continue
@@ -446,7 +469,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
contCount, err := s.metaBase.ContainerCounters(ctx)
if err != nil {
- s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err))
+ s.log.Warn(logs.FailedToGetContainerCounters, zap.Error(err))
return
}
for contID, count := range contCount.Counts {
@@ -454,69 +477,95 @@ func (s *Shard) updateMetrics(ctx context.Context) {
s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
}
- s.metricsWriter.SetMode(s.info.Mode)
+ s.cfg.metricsWriter.SetMode(s.info.Mode)
}
// incObjectCounter increment both physical and logical object
// counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
- s.metricsWriter.IncObjectCounter(physical)
- s.metricsWriter.IncObjectCounter(logical)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
- if isUser {
- s.metricsWriter.IncObjectCounter(user)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.IncObjectCounter(physical)
+ s.cfg.metricsWriter.IncObjectCounter(logical)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
+ if isUser {
+ s.cfg.metricsWriter.IncObjectCounter(user)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
+ }
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
- if v > 0 {
- s.metricsWriter.AddToObjectCounter(typ, -int(v))
+ if s.cfg.metricsWriter != nil && v > 0 {
+ s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) setObjectCounterBy(typ string, v uint64) {
- if v > 0 {
- s.metricsWriter.SetObjectCounter(typ, v)
+ if s.cfg.metricsWriter != nil && v > 0 {
+ s.cfg.metricsWriter.SetObjectCounter(typ, v)
}
}
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
+ if s.cfg.metricsWriter == nil {
+ return
+ }
+
for cnrID, count := range byCnr {
if count.Phy > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
+ s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
}
if count.Logic > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
+ s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
}
if count.User > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
+ s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
}
}
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
- if size != 0 {
- s.metricsWriter.AddToContainerSize(cnr, size)
+ if s.cfg.metricsWriter != nil && size != 0 {
+ s.cfg.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
- if size != 0 {
- s.metricsWriter.AddToPayloadSize(size)
+ if s.cfg.metricsWriter != nil && size != 0 {
+ s.cfg.metricsWriter.AddToPayloadSize(size)
}
}
func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) {
- if v > 0 {
+ if s.cfg.metricsWriter != nil && v > 0 {
s.metricsWriter.SetContainerObjectsCount(cnr, typ, v)
}
}
+func (s *Shard) IncErrorCounter() {
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.IncErrorCounter()
+ }
+}
+
+func (s *Shard) ClearErrorCounter() {
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.ClearErrorCounter()
+ }
+}
+
+func (s *Shard) DeleteShardMetrics() {
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.DeleteShardMetrics()
+ }
+}
+
func (s *Shard) SetEvacuationInProgress(val bool) {
s.m.Lock()
defer s.m.Unlock()
s.info.EvacuationInProgress = val
- s.metricsWriter.SetEvacuationInProgress(val)
+ if s.metricsWriter != nil {
+ s.metricsWriter.SetEvacuationInProgress(val)
+ }
}
diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go
index 84be71c4d..73ba2e82b 100644
--- a/pkg/local_object_storage/shard/shard_test.go
+++ b/pkg/local_object_storage/shard/shard_test.go
@@ -60,8 +60,7 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
{
Storage: blobovniczatree.NewBlobovniczaTree(
context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t)),
blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -90,8 +89,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))),
WithWriteCache(enableWriteCache),
WithWriteCacheOptions(o.wcOpts),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go
index b1232707f..de00eabd1 100644
--- a/pkg/local_object_storage/shard/shutdown_test.go
+++ b/pkg/local_object_storage/shard/shutdown_test.go
@@ -52,10 +52,10 @@ func TestWriteCacheObjectLoss(t *testing.T) {
})
}
require.NoError(t, errG.Wait())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ defer func() { require.NoError(t, sh.Close()) }()
var getPrm GetPrm
diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go
index db361a8bd..26dc8ec1e 100644
--- a/pkg/local_object_storage/shard/tree.go
+++ b/pkg/local_object_storage/shard/tree.go
@@ -43,11 +43,6 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeMove(ctx, d, treeID, m)
}
@@ -80,11 +75,6 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta)
}
@@ -113,46 +103,9 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync)
}
-// TreeApplyBatch implements the pilorama.Forest interface.
-func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- if s.pilorama == nil {
- return ErrPiloramaDisabled
- }
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return ErrReadOnlyMode
- }
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
- return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m)
-}
-
// TreeGetByPath implements the pilorama.Forest interface.
func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath",
@@ -177,11 +130,6 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
}
@@ -207,11 +155,6 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n
if s.info.Mode.NoMetabase() {
return pilorama.Meta{}, 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return pilorama.Meta{}, 0, err
- }
- defer release()
return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID)
}
@@ -237,16 +180,11 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID)
}
// TreeSortedByFilename implements the pilorama.Forest interface.
-func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
+func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
@@ -266,11 +204,6 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID
if s.info.Mode.NoMetabase() {
return nil, last, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, last, err
- }
- defer release()
return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
}
@@ -296,11 +229,6 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return pilorama.Move{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return pilorama.Move{}, err
- }
- defer release()
return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height)
}
@@ -325,11 +253,6 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeDrop(ctx, cid, treeID)
}
@@ -353,11 +276,6 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeList(ctx, cid)
}
@@ -381,11 +299,6 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u
if s.pilorama == nil {
return 0, ErrPiloramaDisabled
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
return s.pilorama.TreeHeight(ctx, cid, treeID)
}
@@ -410,11 +323,6 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b
if s.info.Mode.NoMetabase() {
return false, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return false, err
- }
- defer release()
return s.pilorama.TreeExists(ctx, cid, treeID)
}
@@ -443,11 +351,6 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
}
@@ -472,11 +375,6 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st
if s.info.Mode.NoMetabase() {
return 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID)
}
@@ -498,11 +396,6 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeListTrees(ctx, prm)
}
@@ -532,10 +425,5 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source)
}
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index 9edb89df8..a6de07f03 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -67,12 +67,6 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal)
}
@@ -130,19 +124,12 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
close(started)
defer cleanup()
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
- return
- }
- defer release()
-
- s.log.Info(ctx, logs.StartedWritecacheSealAsync)
+ s.log.Info(logs.StartedWritecacheSealAsync)
if err := s.writeCache.Seal(ctx, prm); err != nil {
- s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
+ s.log.Warn(logs.FailedToSealWritecacheAsync, zap.Error(err))
return
}
- s.log.Info(ctx, logs.WritecacheSealCompletedAsync)
+ s.log.Info(logs.WritecacheSealCompletedAsync)
}()
select {
case <-ctx.Done():
@@ -151,11 +138,5 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
return nil
}
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
return s.writeCache.Seal(ctx, prm)
}
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index fd85b4501..79ab7d9c6 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -43,12 +43,12 @@ func BenchmarkWriteAfterDelete(b *testing.B) {
b.SetParallelism(parallel)
benchmarkRunPar(b, cache, payloadSize)
})
- require.NoError(b, cache.Close(context.Background()))
+ require.NoError(b, cache.Close())
}
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close(context.Background())) }()
+ defer func() { require.NoError(b, cache.Close()) }()
ctx := context.Background()
objGen := testutil.RandObjGenerator{ObjSize: size}
@@ -71,7 +71,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close(context.Background())) }()
+ defer func() { require.NoError(b, cache.Close()) }()
benchmarkRunPar(b, cache, size)
}
@@ -100,7 +100,7 @@ func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening")
- require.NoError(b, cache.Init(context.Background()), "initializing")
+ require.NoError(b, cache.Init(), "initializing")
}
type testMetabase struct{}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index ee709ea73..b97fc5856 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -6,7 +6,6 @@ import (
"sync"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -56,13 +55,12 @@ func New(opts ...Option) Cache {
counter: fstree.NewSimpleCounter(),
options: options{
- log: logger.NewLoggerWrapper(zap.NewNop()),
+ log: &logger.Logger{Logger: zap.NewNop()},
maxObjectSize: defaultMaxObjectSize,
workersCount: defaultFlushWorkersCount,
maxCacheSize: defaultMaxCacheSize,
metrics: DefaultMetrics(),
flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
- qosLimiter: qos.NewNoopLimiter(),
},
}
@@ -96,24 +94,23 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error {
if err != nil {
return metaerr.Wrap(err)
}
- c.initCounters()
- return nil
+ return metaerr.Wrap(c.initCounters())
}
// Init runs necessary services.
-func (c *cache) Init(ctx context.Context) error {
+func (c *cache) Init() error {
c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode))
- if err := c.flushAndDropBBoltDB(ctx); err != nil {
+ if err := c.flushAndDropBBoltDB(context.Background()); err != nil {
return fmt.Errorf("flush previous version write-cache database: %w", err)
}
- ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache
+ ctx, cancel := context.WithCancel(context.Background())
c.cancel.Store(cancel)
c.runFlushLoop(ctx)
return nil
}
// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
-func (c *cache) Close(ctx context.Context) error {
+func (c *cache) Close() error {
if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil {
cancelValue.(context.CancelFunc)()
}
@@ -130,7 +127,7 @@ func (c *cache) Close(ctx context.Context) error {
var err error
if c.fsTree != nil {
- err = c.fsTree.Close(ctx)
+ err = c.fsTree.Close()
if err != nil {
c.fsTree = nil
}
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index 94a0a40db..dda284439 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -46,7 +46,7 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
storageType = StorageTypeFSTree
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
- storagelog.Write(ctx, c.log,
+ storagelog.Write(c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 893d27ba2..bfa6aacb0 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -6,7 +6,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -15,7 +14,6 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
@@ -37,7 +35,6 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String())
fl := newFlushLimiter(c.flushSizeLimit)
c.wg.Add(1)
go func() {
@@ -67,13 +64,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
continue
}
- release, err := c.qosLimiter.ReadRequest(ctx)
- if err != nil {
- c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err))
- c.modeMtx.RUnlock()
- continue
- }
- err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
if err := fl.acquire(oi.DataSize); err != nil {
return err
}
@@ -88,15 +79,11 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
return ctx.Err()
}
})
- release()
if err != nil {
- c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
+ c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
}
c.modeMtx.RUnlock()
-
- // counter changed by fstree
- c.estimateCacheSize()
case <-ctx.Done():
return
}
@@ -120,18 +107,12 @@ func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) {
func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) {
defer fl.release(objInfo.size)
- release, err := c.qosLimiter.WriteRequest(ctx)
- if err != nil {
- c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err))
- return
- }
- defer release()
res, err := c.fsTree.Get(ctx, common.GetPrm{
Address: objInfo.addr,
})
if err != nil {
if !client.IsErrObjectNotFound(err) {
- c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
}
return
}
@@ -145,11 +126,11 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI
c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData)))
}
-func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) {
+func (c *cache) reportFlushError(msg string, addr string, err error) {
if c.reportError != nil {
- c.reportError(ctx, msg, err)
+ c.reportError(msg, err)
} else {
- c.log.Error(ctx, msg,
+ c.log.Error(msg,
zap.String("address", addr),
zap.Error(err))
}
@@ -164,7 +145,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
var obj objectSDK.Object
err := obj.Unmarshal(e.ObjectData)
if err != nil {
- c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
+ c.reportFlushError(logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
if ignoreErrors {
return nil
}
@@ -202,7 +183,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b
if err != nil {
if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
!errors.Is(err, blobstor.ErrNoPlaceFound) {
- c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor,
+ c.reportFlushError(logs.FSTreeCantFushObjectBlobstor,
addr.EncodeToString(), err)
}
return err
@@ -214,7 +195,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b
_, err = c.metabase.UpdateStorageID(ctx, updPrm)
if err != nil {
- c.reportFlushError(ctx, logs.FSTreeCantUpdateID,
+ c.reportFlushError(logs.FSTreeCantUpdateID,
addr.EncodeToString(), err)
}
return err
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 7fc84657c..59a4e4895 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -38,9 +38,9 @@ func TestFlush(t *testing.T) {
errCountOpt := func() (Option, *atomic.Uint32) {
cnt := &atomic.Uint32{}
- return WithReportErrorFunc(func(ctx context.Context, msg string, err error) {
+ return WithReportErrorFunc(func(msg string, err error) {
cnt.Add(1)
- testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
+ testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
}), cnt
}
@@ -114,11 +114,11 @@ func runFlushTest[Option any](
) {
t.Run("no errors", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
require.NoError(t, wc.Flush(context.Background(), false, false))
@@ -127,15 +127,15 @@ func runFlushTest[Option any](
t.Run("flush on moving to degraded mode", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
// Blobstor is read-only, so we expect en error from `flush` here.
- require.Error(t, wc.SetMode(context.Background(), mode.Degraded))
+ require.Error(t, wc.SetMode(mode.Degraded))
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, wc.SetMode(context.Background(), mode.Degraded))
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
+ require.NoError(t, wc.SetMode(mode.Degraded))
check(t, mb, bs, objects)
})
@@ -145,12 +145,12 @@ func runFlushTest[Option any](
t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption()
wc, bs, mb := newCache(t, createCacheFn, errCountOpt)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
f.InjectFn(t, wc)
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
require.Equal(t, uint32(0), errCount.Load())
require.Error(t, wc.Flush(context.Background(), false, false))
@@ -173,7 +173,7 @@ func newCache[Option any](
meta.WithPath(filepath.Join(dir, "meta")),
meta.WithEpochState(dummyEpoch{}))
require.NoError(t, mb.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.Init(context.Background()))
+ require.NoError(t, mb.Init())
bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -184,15 +184,15 @@ func newCache[Option any](
},
}))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init(context.Background()))
+ require.NoError(t, bs.Init())
wc := createCacheFn(t, mb, bs, opts...)
require.NoError(t, wc.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, wc.Init(context.Background()))
+ require.NoError(t, wc.Init())
// First set mode for metabase and blobstor to prevent background flushes.
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, mb.SetMode(mode.ReadOnly))
+ require.NoError(t, bs.SetMode(mode.ReadOnly))
return wc, bs, mb
}
diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go
index e369fbd50..9ec039f91 100644
--- a/pkg/local_object_storage/writecache/iterate.go
+++ b/pkg/local_object_storage/writecache/iterate.go
@@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
return b.ForEach(func(k, _ []byte) error {
err := addr.DecodeString(string(k))
if err != nil {
- return fmt.Errorf("parse object address: %w", err)
+ return fmt.Errorf("could not parse object address: %w", err)
}
return f(addr)
diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go
index 0e020b36e..ddc4101be 100644
--- a/pkg/local_object_storage/writecache/limiter.go
+++ b/pkg/local_object_storage/writecache/limiter.go
@@ -3,8 +3,6 @@ package writecache
import (
"errors"
"sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
var errLimiterClosed = errors.New("acquire failed: limiter closed")
@@ -47,11 +45,17 @@ func (l *flushLimiter) release(size uint64) {
l.cond.L.Lock()
defer l.cond.L.Unlock()
- assert.True(l.size >= size, "flushLimiter: invalid size")
- l.size -= size
+ if l.size >= size {
+ l.size -= size
+ } else {
+ panic("flushLimiter: invalid size")
+ }
- assert.True(l.count > 0, "flushLimiter: invalid count")
- l.count--
+ if l.count > 0 {
+ l.count--
+ } else {
+ panic("flushLimiter: invalid count")
+ }
l.cond.Broadcast()
}
diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go
index 1ca3e1156..db99b203a 100644
--- a/pkg/local_object_storage/writecache/limiter_test.go
+++ b/pkg/local_object_storage/writecache/limiter_test.go
@@ -14,7 +14,7 @@ func TestLimiter(t *testing.T) {
l := newFlushLimiter(uint64(maxSize))
var currSize atomic.Int64
var eg errgroup.Group
- for range 10_000 {
+ for i := 0; i < 10_000; i++ {
eg.Go(func() error {
defer l.release(single)
defer currSize.Add(-1)
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index c491be60b..d12dd603b 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -23,8 +23,8 @@ type setModePrm struct {
// SetMode sets write-cache mode of operation.
// When shard is put in read-only mode all objects in memory are flushed to disk
// and all background jobs are suspended.
-func (c *cache) SetMode(ctx context.Context, m mode.Mode) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode",
+func (c *cache) SetMode(m mode.Mode) error {
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode",
trace.WithAttributes(
attribute.String("mode", m.String()),
))
@@ -60,7 +60,7 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
// guarantees that there are no in-fly operations.
for len(c.flushCh) != 0 {
- c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush)
+ c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
time.Sleep(time.Second)
}
@@ -82,8 +82,8 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
return nil
}
if !shrink {
- if err := c.fsTree.Close(ctx); err != nil {
- return fmt.Errorf("close write-cache storage: %w", err)
+ if err := c.fsTree.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache storage: %w", err)
}
return nil
}
@@ -98,19 +98,19 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
if errors.Is(err, errIterationCompleted) {
empty = false
} else {
- return fmt.Errorf("check write-cache items: %w", err)
+ return fmt.Errorf("failed to check write-cache items: %w", err)
}
}
- if err := c.fsTree.Close(ctx); err != nil {
- return fmt.Errorf("close write-cache storage: %w", err)
+ if err := c.fsTree.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache storage: %w", err)
}
if empty {
err := os.RemoveAll(c.path)
if err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("remove write-cache files: %w", err)
+ return fmt.Errorf("failed to remove write-cache files: %w", err)
}
} else {
- c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
+ c.log.Info(logs.WritecacheShrinkSkippedNotEmpty)
}
return nil
}
diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go
index 4fbadbc64..70cfe8382 100644
--- a/pkg/local_object_storage/writecache/mode_test.go
+++ b/pkg/local_object_storage/writecache/mode_test.go
@@ -18,13 +18,13 @@ func TestMode(t *testing.T) {
require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init(context.Background()))
+ require.NoError(t, wc.Init())
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close(context.Background()))
+ require.NoError(t, wc.Close())
require.NoError(t, wc.Open(context.Background(), mode.Degraded))
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init(context.Background()))
+ require.NoError(t, wc.Init())
require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close(context.Background()))
+ require.NoError(t, wc.Close())
}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index a4f98ad06..66ac7805c 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,10 +1,8 @@
package writecache
import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
// Option represents write-cache configuration option.
@@ -31,21 +29,19 @@ type options struct {
// noSync is true iff FSTree allows unsynchronized writes.
noSync bool
// reportError is the function called when encountering disk errors in background workers.
- reportError func(context.Context, string, error)
+ reportError func(string, error)
// metrics is metrics implementation
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
disableBackgroundFlush bool
// flushSizeLimit is total size of flushing objects.
flushSizeLimit uint64
- // qosLimiter used to limit flush RPS.
- qosLimiter qos.Limiter
}
// WithLogger sets logger.
func WithLogger(log *logger.Logger) Option {
return func(o *options) {
- o.log = log
+ o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))}
}
}
@@ -112,7 +108,7 @@ func WithNoSync(noSync bool) Option {
}
// WithReportErrorFunc sets error reporting function.
-func WithReportErrorFunc(f func(context.Context, string, error)) Option {
+func WithReportErrorFunc(f func(string, error)) Option {
return func(o *options) {
o.reportError = f
}
@@ -138,9 +134,3 @@ func WithFlushSizeLimit(v uint64) Option {
o.flushSizeLimit = v
}
}
-
-func WithQoSLimiter(l qos.Limiter) Option {
- return func(o *options) {
- o.qosLimiter = l
- }
-}
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index 2fbf50913..c53067bea 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -2,7 +2,6 @@ package writecache
import (
"context"
- "fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -60,15 +59,7 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro
// putBig writes object to FSTree and pushes it to the flush workers queue.
func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
- if prm.RawData == nil { // foolproof: RawData should be marshalled by shard.
- data, err := prm.Object.Marshal()
- if err != nil {
- return fmt.Errorf("cannot marshal object: %w", err)
- }
- prm.RawData = data
- }
- size := uint64(len(prm.RawData))
- if !c.hasEnoughSpace(size) {
+ if !c.hasEnoughSpaceFS() {
return ErrOutOfSpace
}
@@ -77,7 +68,7 @@ func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
return err
}
- storagelog.Write(ctx, c.log,
+ storagelog.Write(c.log,
storagelog.AddressField(prm.Address.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree PUT"),
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index 7a52d3672..835686fbb 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -7,6 +7,10 @@ func (c *cache) estimateCacheSize() (uint64, uint64) {
return count, size
}
+func (c *cache) hasEnoughSpaceFS() bool {
+ return c.hasEnoughSpace(c.maxObjectSize)
+}
+
func (c *cache) hasEnoughSpace(objectSize uint64) bool {
count, size := c.estimateCacheSize()
if c.maxCacheCount > 0 && count+1 > c.maxCacheCount {
@@ -15,6 +19,7 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool {
return c.maxCacheSize >= size+objectSize
}
-func (c *cache) initCounters() {
+func (c *cache) initCounters() error {
c.estimateCacheSize()
+ return nil
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index e88566cdf..2e52e5b20 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
fstree.WithFileCounter(c.counter),
)
if err := c.fsTree.Open(mod); err != nil {
- return fmt.Errorf("open FSTree: %w", err)
+ return fmt.Errorf("could not open FSTree: %w", err)
}
if err := c.fsTree.Init(); err != nil {
- return fmt.Errorf("init FSTree: %w", err)
+ return fmt.Errorf("could not init FSTree: %w", err)
}
return nil
@@ -43,9 +43,9 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
if err != nil && !client.IsErrObjectNotFound(err) {
- c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
+ c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
} else if err == nil {
- storagelog.Write(ctx, c.log,
+ storagelog.Write(c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go
index 5eb341ba4..3a100f1a3 100644
--- a/pkg/local_object_storage/writecache/upgrade.go
+++ b/pkg/local_object_storage/writecache/upgrade.go
@@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
return nil
}
if err != nil {
- return fmt.Errorf("check write-cache database existence: %w", err)
+ return fmt.Errorf("could not check write-cache database existence: %w", err)
}
db, err := OpenDB(c.path, true, os.OpenFile)
if err != nil {
- return fmt.Errorf("open write-cache database: %w", err)
+ return fmt.Errorf("could not open write-cache database: %w", err)
}
defer func() {
_ = db.Close()
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index 7ed511318..a973df604 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -38,21 +38,21 @@ type Cache interface {
// Returns ErrReadOnly if the Cache is currently in the read-only mode.
Delete(context.Context, oid.Address) error
Put(context.Context, common.PutPrm) (common.PutRes, error)
- SetMode(context.Context, mode.Mode) error
+ SetMode(mode.Mode) error
SetLogger(*logger.Logger)
DumpInfo() Info
Flush(context.Context, bool, bool) error
Seal(context.Context, SealPrm) error
- Init(context.Context) error
+ Init() error
Open(ctx context.Context, mode mode.Mode) error
- Close(context.Context) error
+ Close() error
GetMetrics() Metrics
}
// MainStorage is the interface of the underlying storage of Cache implementations.
type MainStorage interface {
- Compressor() *compression.Compressor
+ Compressor() *compression.Config
Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error)
Put(context.Context, common.PutPrm) (common.PutRes, error)
}
diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go
index 2849f3052..b6718dea5 100644
--- a/pkg/morph/client/actor.go
+++ b/pkg/morph/client/actor.go
@@ -16,7 +16,7 @@ type actorProvider interface {
GetRPCActor() actor.RPCActor
}
-// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken.
+// Client switches an established connection with neo-go if it is broken.
// This leads to an invalidation of an rpc actor within Client. That means the
// components that are initilized with the rpc actor may unintentionally use
// it when it is already invalidated. SwitchRPCGuardedActor is used to prevent
diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go
index 4462daab4..aae245acd 100644
--- a/pkg/morph/client/balance/balanceOf.go
+++ b/pkg/morph/client/balance/balanceOf.go
@@ -1,33 +1,36 @@
package balance
import (
- "context"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// BalanceOf receives the amount of funds in the client's account
// through the Balance contract call, and returns it.
-func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) {
- h := id.ScriptHash()
+func (c *Client) BalanceOf(id user.ID) (*big.Int, error) {
+ h, err := address.StringToUint160(id.EncodeToString())
+ if err != nil {
+ return nil, err
+ }
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(balanceOfMethod)
invokePrm.SetArgs(h)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err)
} else if ln := len(prms); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln)
}
amount, err := client.BigIntFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err)
}
return amount, nil
}
diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go
index f4685b0ab..4befbef45 100644
--- a/pkg/morph/client/balance/burn.go
+++ b/pkg/morph/client/balance/burn.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -32,12 +30,12 @@ func (b *BurnPrm) SetID(id []byte) {
}
// Burn destroys funds from the account.
-func (c *Client) Burn(ctx context.Context, p BurnPrm) error {
+func (c *Client) Burn(p BurnPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(burnMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go
index 1dacb9574..b05c526dc 100644
--- a/pkg/morph/client/balance/client.go
+++ b/pkg/morph/client/balance/client.go
@@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'balance' contract client: %w", err)
+ return nil, fmt.Errorf("could not create static client of Balance contract: %w", err)
}
return &Client{
@@ -54,7 +54,15 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ *o = append(*o, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go
index 57e61d62b..39e4b28e5 100644
--- a/pkg/morph/client/balance/decimals.go
+++ b/pkg/morph/client/balance/decimals.go
@@ -1,7 +1,6 @@
package balance
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,20 +8,20 @@ import (
// Decimals decimal precision of currency transactions
// through the Balance contract call, and returns it.
-func (c *Client) Decimals(ctx context.Context) (uint32, error) {
+func (c *Client) Decimals() (uint32, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(decimalsMethod)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err)
} else if ln := len(prms); ln != 1 {
return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln)
}
decimals, err := client.IntFromStackItem(prms[0])
if err != nil {
- return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err)
}
return uint32(decimals), nil
}
diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go
index 83e8b0586..a5b206799 100644
--- a/pkg/morph/client/balance/lock.go
+++ b/pkg/morph/client/balance/lock.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -44,12 +42,12 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) {
}
// Lock locks fund on the user account.
-func (c *Client) Lock(ctx context.Context, p LockPrm) error {
+func (c *Client) Lock(p LockPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(lockMethod)
prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go
index 082ade85e..73448da31 100644
--- a/pkg/morph/client/balance/mint.go
+++ b/pkg/morph/client/balance/mint.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -32,12 +30,12 @@ func (m *MintPrm) SetID(id []byte) {
}
// Mint sends funds to the account.
-func (c *Client) Mint(ctx context.Context, p MintPrm) error {
+func (c *Client) Mint(p MintPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(mintMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go
index 870bed166..08fb05289 100644
--- a/pkg/morph/client/balance/transfer.go
+++ b/pkg/morph/client/balance/transfer.go
@@ -1,11 +1,11 @@
package balance
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// TransferPrm groups parameters of TransferX method.
@@ -21,18 +21,27 @@ type TransferPrm struct {
// TransferX transfers p.Amount of GASe-12 from p.From to p.To
// with details p.Details through direct smart contract call.
-func (c *Client) TransferX(ctx context.Context, p TransferPrm) error {
- from := p.From.ScriptHash()
- to := p.To.ScriptHash()
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) TransferX(p TransferPrm) error {
+ from, err := address.StringToUint160(p.From.EncodeToString())
+ if err != nil {
+ return err
+ }
+
+ to, err := address.StringToUint160(p.To.EncodeToString())
+ if err != nil {
+ return err
+ }
prm := client.InvokePrm{}
prm.SetMethod(transferXMethod)
prm.SetArgs(from, to, p.Amount, p.Details)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err = c.client.Invoke(prm)
if err != nil {
- return fmt.Errorf("invoke method (%s): %w", transferXMethod, err)
+ return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err)
}
return nil
}
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index aab058d27..df521f56b 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -9,7 +9,6 @@ import (
"sync/atomic"
"time"
- nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
@@ -20,7 +19,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
@@ -61,9 +59,6 @@ type Client struct {
rpcActor *actor.Actor // neo-go RPC actor
gasToken *nep17.Token // neo-go GAS token wrapper
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
- nnsHash util.Uint160 // NNS contract hash
-
- nnsReader *nnsClient.ContractReader // NNS contract wrapper
acc *wallet.Account // neo account
accAddr util.Uint160 // account's address
@@ -98,12 +93,27 @@ type Client struct {
type cache struct {
m sync.RWMutex
+ nnsHash *util.Uint160
gKey *keys.PublicKey
txHeights *lru.Cache[util.Uint256, uint32]
metrics metrics.MorphCacheMetrics
}
+func (c *cache) nns() *util.Uint160 {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.nnsHash
+}
+
+func (c *cache) setNNSHash(nnsHash util.Uint160) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.nnsHash = &nnsHash
+}
+
func (c *cache) groupKey() *keys.PublicKey {
c.m.RLock()
defer c.m.RUnlock()
@@ -122,6 +132,7 @@ func (c *cache) invalidate() {
c.m.Lock()
defer c.m.Unlock()
+ c.nnsHash = nil
c.gKey = nil
c.txHeights.Purge()
}
@@ -151,10 +162,24 @@ func (e *notHaltStateError) Error() string {
)
}
+// implementation of error interface for FrostFS-specific errors.
+type frostfsError struct {
+ err error
+}
+
+func (e frostfsError) Error() string {
+ return fmt.Sprintf("frostfs error: %v", e.err)
+}
+
+// wraps FrostFS-specific error into frostfsError. Arg must not be nil.
+func wrapFrostFSError(err error) error {
+ return frostfsError{err}
+}
+
// Invoke invokes contract method by sending transaction into blockchain.
// Returns valid until block value.
// Supported args types: int64, string, util.Uint160, []byte and bool.
-func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) {
+func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
start := time.Now()
success := false
defer func() {
@@ -165,29 +190,29 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return 0, ErrConnectionLost
}
txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
if err != nil {
- return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err)
+ return 0, fmt.Errorf("could not invoke %s: %w", method, err)
}
- c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
+ c.logger.Debug(logs.ClientNeoClientInvoke,
zap.String("method", method),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
success = true
- return InvokeRes{Hash: txHash, VUB: vub}, nil
+ return vub, nil
}
// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
// If cb returns an error, the session is closed and this error is returned as-is.
-// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
+// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
// The default batchSize is 100, the default limit from neo-go.
-func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error {
+func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error {
start := time.Now()
success := false
defer func() {
@@ -214,7 +239,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
if err != nil {
return err
} else if val.State != HaltState {
- return ¬HaltStateError{state: val.State, exception: val.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
}
arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err)
@@ -236,7 +261,10 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
}()
// Batch size for TraverseIterator() can restricted on the server-side.
- traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems)
+ traverseBatchSize := batchSize
+ if invoker.DefaultIteratorResultItems < traverseBatchSize {
+ traverseBatchSize = invoker.DefaultIteratorResultItems
+ }
for {
items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize)
if err != nil {
@@ -278,7 +306,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (
}
if val.State != HaltState {
- return nil, ¬HaltStateError{state: val.State, exception: val.FaultException}
+ return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
}
success = true
@@ -299,7 +327,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
return err
}
- c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke,
+ c.logger.Debug(logs.ClientNativeGasTransferInvoke,
zap.String("to", receiver.StringLE()),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -333,7 +361,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
return err
}
- c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke,
+ c.logger.Debug(logs.ClientBatchGasTransferInvoke,
zap.Strings("to", receiversLog),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -360,8 +388,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
- zap.Error(err))
+ c.logger.Error(logs.ClientCantGetBlockchainHeight,
+ zap.String("error", err.Error()))
return nil
}
@@ -374,8 +402,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
- zap.Error(err))
+ c.logger.Error(logs.ClientCantGetBlockchainHeight243,
+ zap.String("error", err.Error()))
return nil
}
@@ -433,28 +461,6 @@ func (c *Client) TxHalt(h util.Uint256) (res bool, err error) {
return len(aer.Executions) > 0 && aer.Executions[0].VMState.HasFlag(vmstate.Halt), nil
}
-func (c *Client) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- if c.inactive {
- return nil, ErrConnectionLost
- }
-
- return c.client.GetApplicationLog(hash, trig)
-}
-
-func (c *Client) GetVersion() (*result.Version, error) {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- if c.inactive {
- return nil, ErrConnectionLost
- }
-
- return c.client.GetVersion()
-}
-
// TxHeight returns true if transaction has been successfully executed and persisted.
func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
c.switchLock.RLock()
@@ -470,7 +476,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
// NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain
// stores alphabet node keys of inner ring there, however the sidechain stores both
// alphabet and non alphabet node keys of inner ring.
-func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) {
+func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -480,7 +486,7 @@ func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err
list, err := c.roleList(noderoles.NeoFSAlphabet)
if err != nil {
- return nil, fmt.Errorf("get alphabet nodes role list: %w", err)
+ return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err)
}
return list, nil
@@ -494,7 +500,7 @@ func (c *Client) GetDesignateHash() util.Uint160 {
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
height, err := c.rpcActor.GetBlockCount()
if err != nil {
- return nil, fmt.Errorf("get chain height: %w", err)
+ return nil, fmt.Errorf("can't get chain height: %w", err)
}
return c.rolemgmt.GetDesignatedByRole(r, height)
@@ -565,7 +571,6 @@ func (c *Client) setActor(act *actor.Actor) {
c.rpcActor = act
c.gasToken = nep17.New(act, gas.Hash)
c.rolemgmt = rolemgmt.New(act)
- c.nnsReader = nnsClient.NewReader(act, c.nnsHash)
}
func (c *Client) GetActor() *actor.Actor {
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index e4dcd0db7..78cb3e82f 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "net"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -42,13 +41,13 @@ type cfg struct {
endpoints []Endpoint
+ singleCli *rpcclient.WSClient // neo-go client for single client mode
+
inactiveModeCb Callback
switchInterval time.Duration
morphCacheMetrics metrics.MorphCacheMetrics
-
- dialerSource DialerSource
}
const (
@@ -61,14 +60,13 @@ var ErrNoHealthyEndpoint = errors.New("no healthy endpoint")
func defaultConfig() *cfg {
return &cfg{
dialTimeout: defaultDialTimeout,
- logger: logger.NewLoggerWrapper(zap.L()),
+ logger: &logger.Logger{Logger: zap.L()},
metrics: morphmetrics.NoopRegister{},
waitInterval: defaultWaitInterval,
signer: &transaction.Signer{
Scopes: transaction.Global,
},
morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{},
- dialerSource: &noopDialerSource{},
}
}
@@ -126,30 +124,41 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
var err error
var act *actor.Actor
- var endpoint Endpoint
- for cli.endpoints.curr, endpoint = range cli.endpoints.list {
- cli.client, act, err = cli.newCli(ctx, endpoint)
+ if cfg.singleCli != nil {
+ // return client in single RPC node mode that uses
+ // predefined WS client
+ //
+ // in case of the closing web socket connection:
+ // if extra endpoints were provided via options,
+ // they will be used in switch process, otherwise
+ // inactive mode will be enabled
+ cli.client = cfg.singleCli
+
+ act, err = newActor(cfg.singleCli, acc, *cfg)
if err != nil {
- cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
- zap.Error(err), zap.String("endpoint", endpoint.Address))
- } else {
- cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint,
- zap.String("endpoint", endpoint.Address))
- if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
- cli.switchIsActive.Store(true)
- go cli.switchToMostPrioritized(ctx)
+ return nil, fmt.Errorf("could not create RPC actor: %w", err)
+ }
+ } else {
+ var endpoint Endpoint
+ for cli.endpoints.curr, endpoint = range cli.endpoints.list {
+ cli.client, act, err = cli.newCli(ctx, endpoint)
+ if err != nil {
+ cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
+ zap.Error(err), zap.String("endpoint", endpoint.Address))
+ } else {
+ cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint,
+ zap.String("endpoint", endpoint.Address))
+ if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
+ cli.switchIsActive.Store(true)
+ go cli.switchToMostPrioritized(ctx)
+ }
+ break
}
- break
+ }
+ if cli.client == nil {
+ return nil, ErrNoHealthyEndpoint
}
}
- if cli.client == nil {
- return nil, ErrNoHealthyEndpoint
- }
- cs, err := cli.client.GetContractStateByID(nnsContractID)
- if err != nil {
- return nil, fmt.Errorf("resolve nns hash: %w", err)
- }
- cli.nnsHash = cs.Hash
cli.setActor(act)
go cli.closeWaiter(ctx)
@@ -166,7 +175,6 @@ func (c *Client) newCli(ctx context.Context, endpoint Endpoint) (*rpcclient.WSCl
Options: rpcclient.Options{
DialTimeout: c.cfg.dialTimeout,
TLSClientConfig: cfg,
- NetDialContext: c.cfg.dialerSource.NetContextDialer(),
},
})
if err != nil {
@@ -277,6 +285,17 @@ func WithEndpoints(endpoints ...Endpoint) Option {
}
}
+// WithSingleClient returns a client constructor option
+// that specifies single neo-go client and forces Client
+// to use it for requests.
+//
+// Passed client must already be initialized.
+func WithSingleClient(cli *rpcclient.WSClient) Option {
+ return func(c *cfg) {
+ c.singleCli = cli
+ }
+}
+
// WithConnLostCallback return a client constructor option
// that specifies a callback that is called when Client
// unsuccessfully tried to connect to all the specified
@@ -301,19 +320,3 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option {
c.morphCacheMetrics = morphCacheMetrics
}
}
-
-type DialerSource interface {
- NetContextDialer() func(context.Context, string, string) (net.Conn, error)
-}
-
-type noopDialerSource struct{}
-
-func (ds *noopDialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
- return nil
-}
-
-func WithDialerSource(ds DialerSource) Option {
- return func(c *cfg) {
- c.dialerSource = ds
- }
-}
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index be684619b..9dd3a337b 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -27,8 +27,17 @@ const (
getMethod = "get"
listMethod = "list"
containersOfMethod = "containersOf"
+ eaclMethod = "eACL"
+ setEACLMethod = "setEACL"
deletionInfoMethod = "deletionInfo"
+ startEstimationMethod = "startContainerEstimation"
+ stopEstimationMethod = "stopContainerEstimation"
+
+ putSizeMethod = "putContainerSize"
+ listSizesMethod = "listContainerSizes"
+ getSizeMethod = "getContainerSize"
+
// putNamedMethod is method name for container put with an alias. It is exported to provide custom fee.
putNamedMethod = "putNamed"
)
@@ -46,9 +55,9 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
opts[i](o)
}
- sc, err := client.NewStatic(cli, contract, fee, *o...)
+ sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...)
if err != nil {
- return nil, fmt.Errorf("create 'container' contract client: %w", err)
+ return nil, fmt.Errorf("can't create container static client: %w", err)
}
return &Client{client: sc}, nil
@@ -68,10 +77,20 @@ func (c Client) ContractAddress() util.Uint160 {
// parameter of Wrapper.
type Option func(*opts)
-type opts []client.StaticClientOption
+type opts struct {
+ staticOpts []client.StaticClientOption
+}
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ o.staticOpts = append(o.staticOpts, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
@@ -81,6 +100,6 @@ func defaultOpts() *opts {
// Considered to be used by IR nodes only.
func AsAlphabet() Option {
return func(o *opts) {
- *o = append(*o, client.AsAlphabet())
+ o.staticOpts = append(o.staticOpts, client.AsAlphabet())
}
}
diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go
index 60fb8ad7c..c4db0fe6e 100644
--- a/pkg/morph/client/container/containers_of.go
+++ b/pkg/morph/client/container/containers_of.go
@@ -1,9 +1,10 @@
package container
import (
- "context"
"errors"
+ "fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
@@ -14,37 +15,28 @@ import (
// to the specified user of FrostFS system. If idUser is nil, returns the list of all containers.
//
// If remote RPC does not support neo-go session API, fallback to List() method.
-func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) {
- var cidList []cid.ID
- var err error
-
- cb := func(id cid.ID) error {
- cidList = append(cidList, id)
- return nil
- }
- if err = c.IterateContainersOf(ctx, idUser, cb); err != nil {
- return nil, err
- }
- return cidList, nil
-}
-
-// iterateContainers iterates over a list of container identifiers
-// belonging to the specified user of FrostFS system and executes
-// `cb` on each element. If idUser is nil, calls it on the list of all containers.
-func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error {
+func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
var rawID []byte
+
if idUser != nil {
rawID = idUser.WalletBytes()
}
- itemCb := func(item stackitem.Item) error {
- id, err := getCIDfromStackItem(item)
+ var cidList []cid.ID
+ cb := func(item stackitem.Item) error {
+ rawID, err := client.BytesFromStackItem(item)
if err != nil {
- return err
+ return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err)
}
- if err = cb(id); err != nil {
- return err
+
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return fmt.Errorf("decode container ID: %w", err)
}
+
+ cidList = append(cidList, id)
return nil
}
@@ -58,10 +50,13 @@ func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb fu
const batchSize = 512
cnrHash := c.client.ContractAddress()
- err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID)
- if err != nil && errors.Is(err, unwrap.ErrNoSessionID) {
- return c.iterate(ctx, idUser, cb)
+ err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID)
+ if err != nil {
+ if errors.Is(err, unwrap.ErrNoSessionID) {
+ return c.list(idUser)
+ }
+ return nil, err
}
- return err
+ return cidList, nil
}
diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go
index 09912efa5..20351b570 100644
--- a/pkg/morph/client/container/delete.go
+++ b/pkg/morph/client/container/delete.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
@@ -13,7 +12,7 @@ import (
// along with signature and session token.
//
// Returns error if container ID is nil.
-func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
+func Delete(c *Client, witness core.RemovalWitness) error {
binCnr := make([]byte, sha256.Size)
witness.ContainerID.Encode(binCnr)
@@ -27,7 +26,7 @@ func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
prm.SetToken(tok.Marshal())
}
- _, err := c.Delete(ctx, prm)
+ _, err := c.Delete(prm)
return err
}
@@ -66,7 +65,9 @@ func (d *DeletePrm) SetKey(key []byte) {
//
// Returns valid until block and any error encountered that caused
// the removal to interrupt.
-func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) Delete(p DeletePrm) (uint32, error) {
if len(p.signature) == 0 && !p.IsControl() {
return 0, errNilArgument
}
@@ -76,9 +77,9 @@ func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
prm.SetArgs(p.cnr, p.signature, p.key, p.token)
prm.InvokePrmOptional = p.InvokePrmOptional
- res, err := c.client.Invoke(ctx, prm)
+ res, err := c.client.Invoke(prm)
if err != nil {
- return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err)
+ return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err)
}
return res.VUB, nil
}
diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go
index 90bcdd7d5..dda6bf98c 100644
--- a/pkg/morph/client/container/deletion_info.go
+++ b/pkg/morph/client/container/deletion_info.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
"strings"
@@ -15,39 +14,39 @@ import (
"github.com/mr-tron/base58"
)
-func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) {
- return DeletionInfo(ctx, (*Client)(x), cnr)
+func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) {
+ return DeletionInfo((*Client)(x), cnr)
}
type deletionInfo interface {
- DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error)
+ DeletionInfo(cid []byte) (*containercore.DelInfo, error)
}
-func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
+func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.DeletionInfo(ctx, binCnr)
+ return c.DeletionInfo(binCnr)
}
-func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) {
+func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(deletionInfoMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err)
}
if len(arr) != 2 {
@@ -56,17 +55,17 @@ func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.D
rawOwner, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err)
}
var owner user.ID
if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil {
- return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not decode container owner id (%s): %w", deletionInfoMethod, err)
}
epoch, err := client.BigIntFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err)
}
return &containercore.DelInfo{
diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go
new file mode 100644
index 000000000..8e9455050
--- /dev/null
+++ b/pkg/morph/client/container/eacl.go
@@ -0,0 +1,95 @@
+package container
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+)
+
+// GetEACL reads the extended ACL table from FrostFS system
+// through Container contract call.
+//
+// Returns apistatus.EACLNotFound if eACL table is missing in the contract.
+func (c *Client) GetEACL(cnr cid.ID) (*container.EACL, error) {
+ binCnr := make([]byte, sha256.Size)
+ cnr.Encode(binCnr)
+
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(eaclMethod)
+ prm.SetArgs(binCnr)
+
+ prms, err := c.client.TestInvoke(prm)
+ if err != nil {
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", eaclMethod, err)
+ } else if ln := len(prms); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", eaclMethod, ln)
+ }
+
+ arr, err := client.ArrayFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get item array of eACL (%s): %w", eaclMethod, err)
+ }
+
+ if len(arr) != 4 {
+ return nil, fmt.Errorf("unexpected eacl stack item count (%s): %d", eaclMethod, len(arr))
+ }
+
+ rawEACL, err := client.BytesFromStackItem(arr[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL (%s): %w", eaclMethod, err)
+ }
+
+ sig, err := client.BytesFromStackItem(arr[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL signature (%s): %w", eaclMethod, err)
+ }
+
+ // Client may not return errors if the table is missing, so check this case additionally.
+ // The absence of a signature in the response can be taken as an eACL absence criterion,
+ // since unsigned table cannot be approved in the storage by design.
+ if len(sig) == 0 {
+ return nil, new(apistatus.EACLNotFound)
+ }
+
+ pub, err := client.BytesFromStackItem(arr[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL public key (%s): %w", eaclMethod, err)
+ }
+
+ binToken, err := client.BytesFromStackItem(arr[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL session token (%s): %w", eaclMethod, err)
+ }
+
+ var res container.EACL
+
+ res.Value = eacl.NewTable()
+ if err = res.Value.Unmarshal(rawEACL); err != nil {
+ return nil, err
+ }
+
+ if len(binToken) > 0 {
+ res.Session = new(session.Container)
+
+ err = res.Session.Unmarshal(binToken)
+ if err != nil {
+ return nil, fmt.Errorf("could not unmarshal session token: %w", err)
+ }
+ }
+
+ // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
+ var sigV2 refs.Signature
+ sigV2.SetKey(pub)
+ sigV2.SetSign(sig)
+ sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256)
+
+ err = res.Signature.ReadFromV2(sigV2)
+ return &res, err
+}
diff --git a/pkg/morph/client/container/estimations.go b/pkg/morph/client/container/estimations.go
new file mode 100644
index 000000000..f288c63cf
--- /dev/null
+++ b/pkg/morph/client/container/estimations.go
@@ -0,0 +1,54 @@
+package container
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+)
+
+// StartEstimationPrm groups parameters of StartEstimation operation.
+type StartEstimationPrm struct {
+ commonEstimationPrm
+}
+
+// StopEstimationPrm groups parameters of StopEstimation operation.
+type StopEstimationPrm struct {
+ commonEstimationPrm
+}
+
+type commonEstimationPrm struct {
+ epoch uint64
+
+ client.InvokePrmOptional
+}
+
+// SetEpoch sets epoch.
+func (p *commonEstimationPrm) SetEpoch(epoch uint64) {
+ p.epoch = epoch
+}
+
+// StartEstimation votes to produce start estimation notification.
+func (c *Client) StartEstimation(p StartEstimationPrm) error {
+ prm := client.InvokePrm{}
+ prm.SetMethod(startEstimationMethod)
+ prm.SetArgs(p.epoch)
+ prm.InvokePrmOptional = p.InvokePrmOptional
+
+ if _, err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", startEstimationMethod, err)
+ }
+ return nil
+}
+
+// StopEstimation votes to produce stop estimation notification.
+func (c *Client) StopEstimation(p StopEstimationPrm) error {
+ prm := client.InvokePrm{}
+ prm.SetMethod(stopEstimationMethod)
+ prm.SetArgs(p.epoch)
+ prm.InvokePrmOptional = p.InvokePrmOptional
+
+ if _, err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", stopEstimationMethod, err)
+ }
+ return nil
+}
diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go
index 8622d2cdd..6715f870f 100644
--- a/pkg/morph/client/container/get.go
+++ b/pkg/morph/client/container/get.go
@@ -1,15 +1,14 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@@ -17,8 +16,8 @@ import (
type containerSource Client
-func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) {
- return Get(ctx, (*Client)(x), cnr)
+func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) {
+ return Get((*Client)(x), cnr)
}
// AsContainerSource provides container Source interface
@@ -28,15 +27,15 @@ func AsContainerSource(w *Client) containercore.Source {
}
type getContainer interface {
- Get(ctx context.Context, cid []byte) (*containercore.Container, error)
+ Get(cid []byte) (*containercore.Container, error)
}
// Get marshals container ID, and passes it to Wrapper's Get method.
-func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) {
+func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.Get(ctx, binCnr)
+ return c.Get(binCnr)
}
// Get reads the container from FrostFS system by binary identifier
@@ -44,24 +43,24 @@ func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Contai
//
// If an empty slice is returned for the requested identifier,
// storage.ErrNotFound error is returned.
-func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
+func (c *Client) Get(cid []byte) (*containercore.Container, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(getMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err)
}
if len(arr) != 4 {
@@ -70,29 +69,29 @@ func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container,
cnrBytes, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err)
}
sigBytes, err := client.BytesFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err)
}
pub, err := client.BytesFromStackItem(arr[2])
if err != nil {
- return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err)
}
tokBytes, err := client.BytesFromStackItem(arr[3])
if err != nil {
- return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err)
}
var cnr containercore.Container
if err := cnr.Value.Unmarshal(cnrBytes); err != nil {
// use other major version if there any
- return nil, fmt.Errorf("unmarshal container: %w", err)
+ return nil, fmt.Errorf("can't unmarshal container: %w", err)
}
if len(tokBytes) > 0 {
@@ -100,7 +99,7 @@ func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container,
err = cnr.Session.Unmarshal(tokBytes)
if err != nil {
- return nil, fmt.Errorf("unmarshal session token: %w", err)
+ return nil, fmt.Errorf("could not unmarshal session token: %w", err)
}
}
diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go
index fc63d1beb..6fed46c1a 100644
--- a/pkg/morph/client/container/list.go
+++ b/pkg/morph/client/container/list.go
@@ -1,22 +1,20 @@
package container
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
-// iterate iterates through a list of container identifiers belonging
+// list returns a list of container identifiers belonging
// to the specified user of FrostFS system. The list is composed
// through Container contract call.
//
-// Iterates through the identifiers of all FrostFS containers if pointer
+// Returns the identifiers of all FrostFS containers if pointer
// to user identifier is nil.
-func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error {
+func (c *Client) list(idUser *user.ID) ([]cid.ID, error) {
var rawID []byte
if idUser != nil {
@@ -27,43 +25,34 @@ func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) e
prm.SetMethod(listMethod)
prm.SetArgs(rawID)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return fmt.Errorf("test invoke (%s): %w", listMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err)
} else if ln := len(res); ln != 1 {
- return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
}
res, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err)
+ return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err)
}
+ cidList := make([]cid.ID, 0, len(res))
for i := range res {
- id, err := getCIDfromStackItem(res[i])
+ rawID, err := client.BytesFromStackItem(res[i])
if err != nil {
- return err
+ return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err)
}
- if err = cb(id); err != nil {
- return err
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
}
+
+ cidList = append(cidList, id)
}
- return nil
-}
-
-func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) {
- rawID, err := client.BytesFromStackItem(item)
- if err != nil {
- return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err)
- }
-
- var id cid.ID
-
- err = id.Decode(rawID)
- if err != nil {
- return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
- }
- return id, nil
+ return cidList, nil
}
diff --git a/pkg/morph/client/container/load.go b/pkg/morph/client/container/load.go
new file mode 100644
index 000000000..5e2c3c2c3
--- /dev/null
+++ b/pkg/morph/client/container/load.go
@@ -0,0 +1,131 @@
+package container
+
+import (
+ "fmt"
+
+ v2refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+// EstimationID is an identity of container load estimation inside Container contract.
+type EstimationID []byte
+
+// ListLoadEstimationsByEpoch returns a list of container load estimations for to the specified epoch.
+// The list is composed through Container contract call.
+func (c *Client) ListLoadEstimationsByEpoch(epoch uint64) ([]EstimationID, error) {
+ invokePrm := client.TestInvokePrm{}
+ invokePrm.SetMethod(listSizesMethod)
+ invokePrm.SetArgs(epoch)
+
+ prms, err := c.client.TestInvoke(invokePrm)
+ if err != nil {
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", listSizesMethod, err)
+ } else if ln := len(prms); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", listSizesMethod, ln)
+ }
+
+ prms, err = client.ArrayFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listSizesMethod, err)
+ }
+
+ res := make([]EstimationID, 0, len(prms))
+ for i := range prms {
+ id, err := client.BytesFromStackItem(prms[i])
+ if err != nil {
+ return nil, fmt.Errorf("could not get ID byte array from stack item (%s): %w", listSizesMethod, err)
+ }
+
+ res = append(res, id)
+ }
+
+ return res, nil
+}
+
+// Estimation is a structure of single container load estimation
+// reported by storage node.
+type Estimation struct {
+ Size uint64
+
+ Reporter []byte
+}
+
+// Estimations is a structure of grouped container load estimation inside Container contract.
+type Estimations struct {
+ ContainerID cid.ID
+
+ Values []Estimation
+}
+
+// GetUsedSpaceEstimations returns a list of container load estimations by ID.
+// The list is composed through Container contract call.
+func (c *Client) GetUsedSpaceEstimations(id EstimationID) (*Estimations, error) {
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(getSizeMethod)
+ prm.SetArgs([]byte(id))
+
+ prms, err := c.client.TestInvoke(prm)
+ if err != nil {
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", getSizeMethod, err)
+ } else if ln := len(prms); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", getSizeMethod, ln)
+ }
+
+ prms, err = client.ArrayFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get stack items of estimation fields from stack item (%s): %w", getSizeMethod, err)
+ } else if ln := len(prms); ln != 2 {
+ return nil, fmt.Errorf("unexpected stack item count of estimations fields (%s)", getSizeMethod)
+ }
+
+ rawCnr, err := client.BytesFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get container ID byte array from stack item (%s): %w", getSizeMethod, err)
+ }
+
+ prms, err = client.ArrayFromStackItem(prms[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get estimation list array from stack item (%s): %w", getSizeMethod, err)
+ }
+
+ var cnr cid.ID
+
+ err = cnr.Decode(rawCnr)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
+ }
+
+ v2 := new(v2refs.ContainerID)
+ v2.SetValue(rawCnr)
+ res := &Estimations{
+ ContainerID: cnr,
+ Values: make([]Estimation, 0, len(prms)),
+ }
+
+ for i := range prms {
+ arr, err := client.ArrayFromStackItem(prms[i])
+ if err != nil {
+ return nil, fmt.Errorf("could not get estimation struct from stack item (%s): %w", getSizeMethod, err)
+ } else if ln := len(arr); ln != 2 {
+ return nil, fmt.Errorf("unexpected stack item count of estimation fields (%s)", getSizeMethod)
+ }
+
+ reporter, err := client.BytesFromStackItem(arr[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get reporter byte array from stack item (%s): %w", getSizeMethod, err)
+ }
+
+ sz, err := client.IntFromStackItem(arr[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get estimation size from stack item (%s): %w", getSizeMethod, err)
+ }
+
+ res.Values = append(res.Values, Estimation{
+ Reporter: reporter,
+ Size: uint64(sz),
+ })
+ }
+
+ return res, nil
+}
diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go
index 3bb84eb87..ee323af00 100644
--- a/pkg/morph/client/container/put.go
+++ b/pkg/morph/client/container/put.go
@@ -1,12 +1,11 @@
package container
import (
- "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
@@ -15,7 +14,7 @@ import (
// along with sig.Key() and sig.Sign().
//
// Returns error if container is nil.
-func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) {
+func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
data := cnr.Value.Marshal()
d := container.ReadDomain(cnr.Value)
@@ -36,7 +35,7 @@ func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID,
prm.SetKey(sigV2.GetKey())
prm.SetSignature(sigV2.GetSign())
- err := c.Put(ctx, prm)
+ err := c.Put(prm)
if err != nil {
return nil, err
}
@@ -94,7 +93,9 @@ func (p *PutPrm) SetZone(zone string) {
//
// Returns calculated container identifier and any error
// encountered that caused the saving to interrupt.
-func (c *Client) Put(ctx context.Context, p PutPrm) error {
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) Put(p PutPrm) error {
if len(p.sig) == 0 || len(p.key) == 0 {
return errNilArgument
}
@@ -115,9 +116,9 @@ func (c *Client) Put(ctx context.Context, p PutPrm) error {
prm.SetMethod(method)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
if err != nil {
- return fmt.Errorf("invoke method (%s): %w", method, err)
+ return fmt.Errorf("could not invoke method (%s): %w", method, err)
}
return nil
}
diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go
index d3eba7639..016b56f8f 100644
--- a/pkg/morph/client/frostfs/cheque.go
+++ b/pkg/morph/client/frostfs/cheque.go
@@ -1,8 +1,6 @@
package frostfscontract
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -39,13 +37,13 @@ func (c *ChequePrm) SetLock(lock util.Uint160) {
}
// Cheque invokes `cheque` method of FrostFS contract.
-func (x *Client) Cheque(ctx context.Context, p ChequePrm) error {
+func (x *Client) Cheque(p ChequePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(chequeMethod)
prm.SetArgs(p.id, p.user, p.amount, p.lock)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(ctx, prm)
+ _, err := x.client.Invoke(prm)
return err
}
@@ -68,12 +66,12 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) {
}
// AlphabetUpdate update list of alphabet nodes.
-func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error {
+func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(alphabetUpdateMethod)
prm.SetArgs(p.id, p.pubs)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(ctx, prm)
+ _, err := x.client.Invoke(prm)
return err
}
diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go
index cd6a9849e..571915c27 100644
--- a/pkg/morph/client/frostfs/client.go
+++ b/pkg/morph/client/frostfs/client.go
@@ -35,7 +35,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'frostfs' contract client: %w", err)
+ return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go
index 61eb03f09..4c31f42de 100644
--- a/pkg/morph/client/frostfsid/client.go
+++ b/pkg/morph/client/frostfsid/client.go
@@ -27,7 +27,7 @@ var _ frostfsidcore.SubjectProvider = (*Client)(nil)
func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) {
sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet())
if err != nil {
- return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err)
+ return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go
index 3a789672a..0852f536c 100644
--- a/pkg/morph/client/frostfsid/subject.go
+++ b/pkg/morph/client/frostfsid/subject.go
@@ -1,7 +1,6 @@
package frostfsid
import (
- "context"
"fmt"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
@@ -15,14 +14,14 @@ const (
methodGetSubjectExtended = "getSubjectExtended"
)
-func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
+func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(methodGetSubject)
prm.SetArgs(addr)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubject, err)
}
structArr, err := checkStackItem(res)
@@ -32,20 +31,20 @@ func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidc
subj, err := frostfsidclient.ParseSubject(structArr)
if err != nil {
- return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err)
}
return subj, nil
}
-func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
+func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(methodGetSubjectExtended)
prm.SetArgs(addr)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubjectExtended, err)
}
structArr, err := checkStackItem(res)
@@ -55,7 +54,7 @@ func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*fr
subj, err := frostfsidclient.ParseSubjectExtended(structArr)
if err != nil {
- return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err)
}
return subj, nil
@@ -68,7 +67,7 @@ func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error
structArr, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err)
+ return nil, fmt.Errorf("could not get item array of container (%s): %w", methodGetSubject, err)
}
return
}
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index b9e39c25e..10ed21582 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -2,7 +2,6 @@ package client
import (
"context"
- "slices"
"sort"
"time"
@@ -43,7 +42,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
newEndpoint := c.endpoints.list[c.endpoints.curr]
cli, act, err := c.newCli(ctx, newEndpoint)
if err != nil {
- c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
+ c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
zap.String("endpoint", newEndpoint.Address),
zap.Error(err),
)
@@ -53,7 +52,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
c.cache.invalidate()
- c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
+ c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
zap.String("endpoint", newEndpoint.Address))
c.client = cli
@@ -100,7 +99,8 @@ mainLoop:
case <-t.C:
c.switchLock.RLock()
- endpointsCopy := slices.Clone(c.endpoints.list)
+ endpointsCopy := make([]Endpoint, len(c.endpoints.list))
+ copy(endpointsCopy, c.endpoints.list)
currPriority := c.endpoints.list[c.endpoints.curr].Priority
highestPriority := c.endpoints.list[0].Priority
@@ -122,7 +122,7 @@ mainLoop:
cli, act, err := c.newCli(ctx, e)
if err != nil {
- c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
+ c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
zap.String("endpoint", tryE),
zap.Error(err),
)
@@ -147,7 +147,7 @@ mainLoop:
c.switchLock.Unlock()
- c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC,
+ c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC,
zap.String("endpoint", tryE))
return
diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go
index de8afbfb5..eafa097e9 100644
--- a/pkg/morph/client/netmap/client.go
+++ b/pkg/morph/client/netmap/client.go
@@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'netmap' contract client: %w", err)
+ return nil, fmt.Errorf("can't create netmap static client: %w", err)
}
return &Client{client: sc}, nil
@@ -65,7 +65,15 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ *o = append(*o, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go
index 3f6aed506..2d19a8193 100644
--- a/pkg/morph/client/netmap/config.go
+++ b/pkg/morph/client/netmap/config.go
@@ -1,7 +1,7 @@
package netmap
import (
- "context"
+ "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -24,45 +24,75 @@ const (
// MaxObjectSize receives max object size configuration
// value through the Netmap contract call.
-func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, MaxObjectSizeConfig)
+func (c *Client) MaxObjectSize() (uint64, error) {
+ objectSize, err := c.readUInt64Config(MaxObjectSizeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err)
+ }
+
+ return objectSize, nil
}
// EpochDuration returns number of sidechain blocks per one FrostFS epoch.
-func (c *Client) EpochDuration(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, EpochDurationConfig)
+func (c *Client) EpochDuration() (uint64, error) {
+ epochDuration, err := c.readUInt64Config(EpochDurationConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err)
+ }
+
+ return epochDuration, nil
}
// ContainerFee returns fee paid by container owner to each alphabet node
// for container registration.
-func (c *Client) ContainerFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, ContainerFeeConfig)
+func (c *Client) ContainerFee() (uint64, error) {
+ fee, err := c.readUInt64Config(ContainerFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err)
+ }
+
+ return fee, nil
}
// ContainerAliasFee returns additional fee paid by container owner to each
// alphabet node for container nice name registration.
-func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, ContainerAliasFeeConfig)
+func (c *Client) ContainerAliasFee() (uint64, error) {
+ fee, err := c.readUInt64Config(ContainerAliasFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err)
+ }
+
+ return fee, nil
}
// HomomorphicHashDisabled returns global configuration value of homomorphic hashing
// settings.
//
// Returns (false, nil) if config key is not found in the contract.
-func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) {
- return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey)
+func (c *Client) HomomorphicHashDisabled() (bool, error) {
+ return c.readBoolConfig(HomomorphicHashingDisabledKey)
}
// InnerRingCandidateFee returns global configuration value of fee paid by
// node to be in inner ring candidates list.
-func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, IrCandidateFeeConfig)
+func (c *Client) InnerRingCandidateFee() (uint64, error) {
+ fee, err := c.readUInt64Config(IrCandidateFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err)
+ }
+
+ return fee, nil
}
// WithdrawFee returns global configuration value of fee paid by user to
// withdraw assets from FrostFS contract.
-func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, WithdrawFeeConfig)
+func (c *Client) WithdrawFee() (uint64, error) {
+ fee, err := c.readUInt64Config(WithdrawFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err)
+ }
+
+ return fee, nil
}
// MaintenanceModeAllowed reads admission of "maintenance" state from the
@@ -70,32 +100,34 @@ func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
// that storage nodes are allowed to switch their state to "maintenance".
//
// By default, maintenance state is disallowed.
-func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) {
- return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig)
+func (c *Client) MaintenanceModeAllowed() (bool, error) {
+ return c.readBoolConfig(MaintenanceModeAllowedConfig)
}
-func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) {
- v, err := c.config(ctx, []byte(key))
- if err != nil {
- return 0, fmt.Errorf("read netconfig value '%s': %w", key, err)
- }
-
- bi, err := v.TryInteger()
+func (c *Client) readUInt64Config(key string) (uint64, error) {
+ v, err := c.config([]byte(key), IntegerAssert)
if err != nil {
return 0, err
}
- return bi.Uint64(), nil
+
+ // IntegerAssert is guaranteed to return int64 if the error is nil.
+ return uint64(v.(int64)), nil
}
// reads boolean value by the given key from the FrostFS network configuration
// stored in the Sidechain. Returns false if key is not presented.
-func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) {
- v, err := c.config(ctx, []byte(key))
+func (c *Client) readBoolConfig(key string) (bool, error) {
+ v, err := c.config([]byte(key), BoolAssert)
if err != nil {
- return false, fmt.Errorf("read netconfig value '%s': %w", key, err)
+ if errors.Is(err, ErrConfigNotFound) {
+ return false, nil
+ }
+
+ return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err)
}
- return v.TryBool()
+ // BoolAssert is guaranteed to return bool if the error is nil.
+ return v.(bool), nil
}
// SetConfigPrm groups parameters of SetConfig operation.
@@ -123,13 +155,13 @@ func (s *SetConfigPrm) SetValue(value any) {
}
// SetConfig sets config field.
-func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error {
+func (c *Client) SetConfig(p SetConfigPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(setConfigMethod)
prm.SetArgs(p.id, p.key, p.value)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
@@ -166,14 +198,14 @@ type NetworkConfiguration struct {
}
// ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain.
-func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) {
+func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
var res NetworkConfiguration
prm := client.TestInvokePrm{}
prm.SetMethod(configListMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return res, fmt.Errorf("test invoke (%s): %w",
+ return res, fmt.Errorf("could not perform test invocation (%s): %w",
configListMethod, err)
}
@@ -244,18 +276,22 @@ func bytesToBool(val []byte) bool {
return false
}
+// ErrConfigNotFound is returned when the requested key was not found
+// in the network config (returned value is `Null`).
+var ErrConfigNotFound = errors.New("config value not found")
+
// config performs the test invoke of get config value
// method of FrostFS Netmap contract.
//
// Returns ErrConfigNotFound if config key is not found in the contract.
-func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) {
+func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(configMethod)
prm.SetArgs(key)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
configMethod, err)
}
@@ -264,7 +300,26 @@ func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error)
configMethod, ln)
}
- return items[0], nil
+ if _, ok := items[0].(stackitem.Null); ok {
+ return nil, ErrConfigNotFound
+ }
+
+ return assert(items[0])
+}
+
+// IntegerAssert converts stack item to int64.
+func IntegerAssert(item stackitem.Item) (any, error) {
+ return client.IntFromStackItem(item)
+}
+
+// StringAssert converts stack item to string.
+func StringAssert(item stackitem.Item) (any, error) {
+ return client.StringFromStackItem(item)
+}
+
+// BoolAssert converts stack item to bool.
+func BoolAssert(item stackitem.Item) (any, error) {
+ return client.BoolFromStackItem(item)
}
// iterateRecords iterates over all config records and passes them to f.
diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go
index 8561329ec..92d569ae2 100644
--- a/pkg/morph/client/netmap/epoch.go
+++ b/pkg/morph/client/netmap/epoch.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,13 +8,13 @@ import (
// Epoch receives number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) Epoch(ctx context.Context) (uint64, error) {
+func (c *Client) Epoch() (uint64, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(epochMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w",
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w",
epochMethod, err)
}
@@ -26,20 +25,20 @@ func (c *Client) Epoch(ctx context.Context) (uint64, error) {
num, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err)
+ return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err)
}
return uint64(num), nil
}
// LastEpochBlock receives block number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
+func (c *Client) LastEpochBlock() (uint32, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(lastEpochBlockMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w",
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w",
lastEpochBlockMethod, err)
}
@@ -50,7 +49,7 @@ func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
block, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("get number from stack item (%s): %w",
+ return 0, fmt.Errorf("could not get number from stack item (%s): %w",
lastEpochBlockMethod, err)
}
return uint32(block), nil
diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go
index 0e1f9186b..d6f8c56b2 100644
--- a/pkg/morph/client/netmap/innerring.go
+++ b/pkg/morph/client/netmap/innerring.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"crypto/elliptic"
"fmt"
@@ -24,7 +23,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) {
}
// UpdateInnerRing updates inner ring keys.
-func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
+func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
args := make([][]byte, len(p.keys))
for i := range args {
args[i] = p.keys[i].Bytes()
@@ -35,18 +34,18 @@ func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
prm.SetArgs(args)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
return err
}
// GetInnerRingList return current IR list.
-func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) {
+func (c *Client) GetInnerRingList() (keys.PublicKeys, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(innerRingListMethod)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err)
}
return irKeysFromStackItem(prms, innerRingListMethod)
@@ -59,7 +58,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys
irs, err := client.ArrayFromStackItem(stack[0])
if err != nil {
- return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err)
+ return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
}
irKeys := make(keys.PublicKeys, len(irs))
@@ -79,7 +78,7 @@ const irNodeFixedPrmNumber = 1
func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
prms, err := client.ArrayFromStackItem(prm)
if err != nil {
- return nil, fmt.Errorf("get stack item array (IRNode): %w", err)
+ return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err)
} else if ln := len(prms); ln != irNodeFixedPrmNumber {
return nil, fmt.Errorf(
"unexpected stack item count (IRNode): expected %d, has %d",
@@ -90,7 +89,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
byteKey, err := client.BytesFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err)
+ return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err)
}
return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256())
diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go
index 97782fc25..61bbf5f17 100644
--- a/pkg/morph/client/netmap/netmap.go
+++ b/pkg/morph/client/netmap/netmap.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
@@ -12,14 +11,14 @@ import (
// GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and
// decodes netmap.NetMap from the response.
-func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(epochSnapshotMethod)
invokePrm.SetArgs(epoch)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
epochSnapshotMethod, err)
}
@@ -35,13 +34,13 @@ func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.Ne
// GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo
// from the response.
-func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
+func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapCandidatesMethod)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err)
}
if len(res) > 0 {
@@ -52,13 +51,13 @@ func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
}
// NetMap calls "netmap" method and decode netmap.NetMap from the response.
-func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) {
+func (c *Client) NetMap() (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapMethod)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
netMapMethod, err)
}
@@ -137,11 +136,11 @@ func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error {
default:
return fmt.Errorf("unsupported state %v", node.State)
case netmapcontract.NodeStateOnline:
- dst.SetStatus(netmap.Online)
+ dst.SetOnline()
case netmapcontract.NodeStateOffline:
- dst.SetStatus(netmap.Offline)
+ dst.SetOffline()
case netmapcontract.NodeStateMaintenance:
- dst.SetStatus(netmap.Maintenance)
+ dst.SetMaintenance()
}
return nil
diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go
index e686e271e..a8a306197 100644
--- a/pkg/morph/client/netmap/netmap_test.go
+++ b/pkg/morph/client/netmap/netmap_test.go
@@ -20,11 +20,11 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
switch i % 3 {
default:
- expected[i].SetStatus(netmap.Offline)
+ expected[i].SetOffline()
case int(netmapcontract.NodeStateOnline):
- expected[i].SetStatus(netmap.Online)
+ expected[i].SetOnline()
case int(netmapcontract.NodeStateMaintenance):
- expected[i].SetStatus(netmap.Maintenance)
+ expected[i].SetMaintenance()
}
expected[i].SetPublicKey(pub)
@@ -38,12 +38,12 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
var state int64
- switch expected[i].Status() {
- case netmap.Online:
+ switch {
+ case expected[i].IsOnline():
state = int64(netmapcontract.NodeStateOnline)
- case netmap.Offline:
+ case expected[i].IsOffline():
state = int64(netmapcontract.NodeStateOffline)
- case netmap.Maintenance:
+ case expected[i].IsMaintenance():
state = int64(netmapcontract.NodeStateMaintenance)
}
diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go
index 341b20935..ded386c86 100644
--- a/pkg/morph/client/netmap/new_epoch.go
+++ b/pkg/morph/client/netmap/new_epoch.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,14 +8,14 @@ import (
// NewEpoch updates FrostFS epoch number through
// Netmap contract call.
-func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error {
+func (c *Client) NewEpoch(epoch uint64) error {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
- _, err := c.client.Invoke(ctx, prm)
+ _, err := c.client.Invoke(prm)
if err != nil {
- return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
+ return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
}
return nil
}
@@ -25,16 +24,16 @@ func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error {
// control notary transaction internally to ensure all
// nodes produce the same transaction with high probability.
// If vub > 0, vub will be used as valid until block value.
-func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) {
+func (c *Client) NewEpochControl(epoch uint64, vub uint32) (uint32, error) {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
prm.SetControlTX(true)
prm.SetVUB(vub)
- res, err := c.client.Invoke(ctx, prm)
+ res, err := c.client.Invoke(prm)
if err != nil {
- return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
+ return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
}
return res.VUB, nil
}
diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go
index e83acde39..764bbc899 100644
--- a/pkg/morph/client/netmap/peer.go
+++ b/pkg/morph/client/netmap/peer.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"errors"
"fmt"
@@ -25,7 +24,7 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) {
// AddPeer registers peer in FrostFS network through
// Netmap contract call.
-func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
+func (c *Client) AddPeer(p AddPeerPrm) error {
method := addPeerMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -40,15 +39,15 @@ func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
prm.SetArgs(p.nodeInfo.Marshal())
prm.InvokePrmOptional = p.InvokePrmOptional
- if _, err := c.client.Invoke(ctx, prm); err != nil {
- return fmt.Errorf("invoke method (%s): %w", method, err)
+ if _, err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", method, err)
}
return nil
}
// ForceRemovePeer marks the given peer as offline via a notary control transaction.
// If vub > 0, vub will be used as valid until block value.
-func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
+func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
if !c.client.WithNotary() {
return 0, errFailedToRemovePeerWithoutNotary
}
@@ -58,9 +57,9 @@ func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo,
prm.SetControlTX(true)
prm.SetVUB(vub)
- res, err := c.UpdatePeerState(ctx, prm)
+ vub, err := c.UpdatePeerState(prm)
if err != nil {
return 0, fmt.Errorf("updating peer state: %v", err)
}
- return res.VUB, nil
+ return vub, nil
}
diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go
index 9dbec1a90..ba2c26af7 100644
--- a/pkg/morph/client/netmap/snapshot.go
+++ b/pkg/morph/client/netmap/snapshot.go
@@ -1,22 +1,19 @@
package netmap
import (
- "context"
- "fmt"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
// GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response.
-func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(snapshotMethod)
prm.SetArgs(diff)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err)
+ return nil, err
}
return DecodeNetMap(res)
diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go
index f9f639c19..7c3a4e8cd 100644
--- a/pkg/morph/client/netmap/update_state.go
+++ b/pkg/morph/client/netmap/update_state.go
@@ -1,7 +1,7 @@
package netmap
import (
- "context"
+ "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -36,7 +36,7 @@ func (u *UpdatePeerPrm) SetMaintenance() {
}
// UpdatePeerState changes peer status through Netmap contract call.
-func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) {
+func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) {
method := updateStateMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -55,5 +55,9 @@ func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.I
prm.SetArgs(int64(p.state), p.key)
prm.InvokePrmOptional = p.InvokePrmOptional
- return c.client.Invoke(ctx, prm)
+ res, err := c.client.Invoke(prm)
+ if err != nil {
+ return 0, fmt.Errorf("could not invoke smart contract: %w", err)
+ }
+ return res.VUB, nil
}
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index bc00eb889..218f7ad8e 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -8,12 +8,14 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
)
const (
@@ -35,8 +37,12 @@ const (
NNSPolicyContractName = "policy.frostfs"
)
-// ErrNNSRecordNotFound means that there is no such record in NNS contract.
-var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
+var (
+ // ErrNNSRecordNotFound means that there is no such record in NNS contract.
+ ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
+
+ errEmptyResultStack = errors.New("returned result stack is empty")
+)
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
// based on alphabet index.
@@ -55,36 +61,97 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) {
return util.Uint160{}, ErrConnectionLost
}
- sh, err = nnsResolve(c.nnsReader, name)
+ nnsHash, err := c.NNSHash()
+ if err != nil {
+ return util.Uint160{}, err
+ }
+
+ sh, err = nnsResolve(c.client, nnsHash, name)
if err != nil {
return sh, fmt.Errorf("NNS.resolve: %w", err)
}
return sh, nil
}
-func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) {
- available, err := r.IsAvailable(domain)
- if err != nil {
- return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
+// NNSHash returns NNS contract hash.
+func (c *Client) NNSHash() (util.Uint160, error) {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ if c.inactive {
+ return util.Uint160{}, ErrConnectionLost
}
- if available {
+ success := false
+ startedAt := time.Now()
+
+ defer func() {
+ c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt))
+ }()
+
+ nnsHash := c.cache.nns()
+
+ if nnsHash == nil {
+ cs, err := c.client.GetContractStateByID(nnsContractID)
+ if err != nil {
+ return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err)
+ }
+
+ c.cache.setNNSHash(cs.Hash)
+ nnsHash = &cs.Hash
+ }
+ success = true
+ return *nnsHash, nil
+}
+
+func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
+ found, err := exists(c, nnsHash, domain)
+ if err != nil {
+ return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err)
+ }
+
+ if !found {
return nil, ErrNNSRecordNotFound
}
- return r.Resolve(domain, big.NewInt(int64(nns.TXT)))
+ result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{
+ {
+ Type: smartcontract.StringType,
+ Value: domain,
+ },
+ {
+ Type: smartcontract.IntegerType,
+ Value: big.NewInt(int64(nns.TXT)),
+ },
+ }, nil)
+ if err != nil {
+ return nil, err
+ }
+ if result.State != vmstate.Halt.String() {
+ return nil, fmt.Errorf("invocation failed: %s", result.FaultException)
+ }
+ if len(result.Stack) == 0 {
+ return nil, errEmptyResultStack
+ }
+ return result.Stack[0], nil
}
-func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) {
- arr, err := nnsResolveItem(r, domain)
+func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) {
+ res, err := nnsResolveItem(c, nnsHash, domain)
if err != nil {
return util.Uint160{}, err
}
- if len(arr) == 0 {
- return util.Uint160{}, errors.New("NNS record is missing")
+ // Parse the result of resolving NNS record.
+ // It works with multiple formats (corresponding to multiple NNS versions).
+ // If array of hashes is provided, it returns only the first one.
+ if arr, ok := res.Value().([]stackitem.Item); ok {
+ if len(arr) == 0 {
+ return util.Uint160{}, errors.New("NNS record is missing")
+ }
+ res = arr[0]
}
- bs, err := arr[0].TryBytes()
+ bs, err := res.TryBytes()
if err != nil {
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
}
@@ -104,6 +171,33 @@ func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error
return util.Uint160{}, errors.New("no valid hashes are found")
}
+func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) {
+ result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{
+ {
+ Type: smartcontract.StringType,
+ Value: domain,
+ },
+ }, nil)
+ if err != nil {
+ return false, err
+ }
+
+ if len(result.Stack) == 0 {
+ return false, errEmptyResultStack
+ }
+
+ res := result.Stack[0]
+
+ available, err := res.TryBool()
+ if err != nil {
+ return false, fmt.Errorf("malformed response: %w", err)
+ }
+
+ // not available means that it is taken
+ // and, therefore, exists
+ return !available, nil
+}
+
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
// Should be called for side-chain client only.
func (c *Client) SetGroupSignerScope() error {
@@ -147,12 +241,18 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
return gKey, nil
}
- arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName)
+ nnsHash, err := c.NNSHash()
if err != nil {
return nil, err
}
- if len(arr) == 0 {
+ item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName)
+ if err != nil {
+ return nil, err
+ }
+
+ arr, ok := item.Value().([]stackitem.Item)
+ if !ok || len(arr) == 0 {
return nil, errors.New("NNS record is missing")
}
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 448702613..616b3b5c3 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -1,7 +1,6 @@
package client
import (
- "context"
"crypto/elliptic"
"encoding/binary"
"errors"
@@ -38,7 +37,8 @@ type (
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
- proxy util.Uint160
+ notary util.Uint160
+ proxy util.Uint160
}
notaryCfg struct {
@@ -57,11 +57,16 @@ const (
defaultNotaryValidTime = 50
defaultNotaryRoundTime = 100
- setDesignateMethod = "designateAsRole"
+ notaryBalanceOfMethod = "balanceOf"
+ notaryExpirationOfMethod = "expirationOf"
+ setDesignateMethod = "designateAsRole"
+ notaryBalanceErrMsg = "can't fetch notary balance"
notaryNotEnabledPanicMsg = "notary support was not enabled on this client"
)
+var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack")
+
func defaultNotaryConfig(c *Client) *notaryCfg {
return ¬aryCfg{
txValidTime: defaultNotaryValidTime,
@@ -101,6 +106,7 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error {
txValidTime: cfg.txValidTime,
roundTime: cfg.roundTime,
alphabetSource: cfg.alphabetSource,
+ notary: notary.Hash,
}
c.notary = notaryCfg
@@ -134,7 +140,7 @@ func (c *Client) ProbeNotary() (res bool) {
// use this function.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) {
+func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uint256, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -148,18 +154,16 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err)
+ return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err)
}
- r := notary.NewReader(c.rpcActor)
- currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash())
+ currentTill, err := c.depositExpirationOf()
if err != nil {
- return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err)
+ return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err)
}
- till := max(int64(bc+delta), int64(currentTill))
- res, _, err := c.depositNotary(ctx, amount, till)
- return res, err
+ till := max(int64(bc+delta), currentTill)
+ return c.depositNotary(amount, till)
}
// DepositEndlessNotary calls notary deposit method. Unlike `DepositNotary`,
@@ -167,12 +171,12 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta
// This allows to avoid ValidAfterDeposit failures.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) {
+func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return util.Uint256{}, 0, ErrConnectionLost
+ return util.Uint256{}, ErrConnectionLost
}
if c.notary == nil {
@@ -180,37 +184,37 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8)
}
// till value refers to a block height and it is uint32 value in neo-go
- return c.depositNotary(ctx, amount, math.MaxUint32)
+ return c.depositNotary(amount, math.MaxUint32)
}
-func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
+func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint256, err error) {
txHash, vub, err := c.gasToken.Transfer(
c.accAddr,
- notary.Hash,
+ c.notary.notary,
big.NewInt(int64(amount)),
[]any{c.acc.PrivateKey().GetScriptHash(), till})
if err != nil {
if !errors.Is(err, neorpc.ErrAlreadyExists) {
- return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err)
+ return util.Uint256{}, fmt.Errorf("can't make notary deposit: %w", err)
}
// Transaction is already in mempool waiting to be processed.
// This is an expected situation if we restart the service.
- c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade,
+ c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
zap.Error(err))
- return util.Uint256{}, 0, nil
+ return util.Uint256{}, nil
}
- c.logger.Info(ctx, logs.ClientNotaryDepositInvoke,
+ c.logger.Info(logs.ClientNotaryDepositInvoke,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
- return txHash, vub, nil
+ return txHash, nil
}
// GetNotaryDeposit returns deposit of client's account in notary contract.
@@ -231,10 +235,18 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) {
sh := c.acc.PrivateKey().PublicKey().GetScriptHash()
- r := notary.NewReader(c.rpcActor)
- bigIntDeposit, err := r.BalanceOf(sh)
+ items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh)
if err != nil {
- return 0, fmt.Errorf("get notary deposit: %w", err)
+ return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)
+ }
+
+ if len(items) != 1 {
+ return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems))
+ }
+
+ bigIntDeposit, err := items[0].TryInteger()
+ if err != nil {
+ return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err))
}
return bigIntDeposit.Int64(), nil
@@ -261,7 +273,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) {
// committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error {
+func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -275,11 +287,10 @@ func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm)
nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
- ctx,
setDesignateMethod,
nonce,
vub,
@@ -310,7 +321,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) {
// Requires committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error {
+func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -324,11 +335,10 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet
nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
- ctx,
setDesignateMethod,
nonce,
vub,
@@ -344,19 +354,19 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet
// Returns valid until block value.
//
// `nonce` and `vub` are used only if notary is enabled.
-func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return 0, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(ctx, contract, fee, method, args...)
+ return c.Invoke(contract, fee, method, args...)
}
- return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...)
+ return c.notaryInvoke(false, true, contract, nonce, vub, method, args...)
}
// NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's
@@ -364,19 +374,19 @@ func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fi
// not expected to be signed by the current node.
//
// Considered to be used by non-IR nodes.
-func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return 0, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(ctx, contract, fee, method, args...)
+ return c.Invoke(contract, fee, method, args...)
}
- return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...)
+ return c.notaryInvoke(false, false, contract, rand.Uint32(), vubP, method, args...)
}
// NotarySignAndInvokeTX signs and sends notary request that was received from
@@ -393,7 +403,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return fmt.Errorf("fetch current alphabet keys: %w", err)
+ return fmt.Errorf("could not fetch current alphabet keys: %w", err)
}
cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList)
@@ -418,7 +428,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return err
}
- c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked,
+ c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked,
zap.String("tx_hash", mainH.StringLE()),
zap.Uint32("valid_until_block", untilActual),
zap.String("fallback_hash", fbH.StringLE()))
@@ -426,13 +436,13 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return nil
}
-func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error {
+func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error {
designate := c.GetDesignateHash()
- _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...)
+ _, err := c.notaryInvoke(true, true, designate, nonce, &vub, method, args...)
return err
}
-func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) {
start := time.Now()
success := false
defer func() {
@@ -441,27 +451,27 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return InvokeRes{}, err
+ return 0, err
}
until, err := c.getUntilValue(vub)
if err != nil {
- return InvokeRes{}, err
+ return 0, err
}
cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee)
if err != nil {
- return InvokeRes{}, err
+ return 0, err
}
nAct, err := notary.NewActor(c.client, cosigners, c.acc)
if err != nil {
- return InvokeRes{}, err
+ return 0, err
}
mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != vmstate.Halt.String() {
- return ¬HaltStateError{state: r.State, exception: r.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
}
t.ValidUntilBlock = until
@@ -471,17 +481,17 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo
}, args...))
if err != nil && !alreadyOnChainError(err) {
- return InvokeRes{}, err
+ return 0, err
}
- c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked,
+ c.logger.Debug(logs.ClientNotaryRequestInvoked,
zap.String("method", method),
zap.Uint32("valid_until_block", untilActual),
zap.String("tx_hash", mainH.StringLE()),
zap.String("fallback_hash", fbH.StringLE()))
success = true
- return InvokeRes{Hash: mainH, VUB: until}, nil
+ return until, nil
}
func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) {
@@ -515,24 +525,24 @@ func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabet
if ok {
pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err)
+ return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err)
}
acc = notary.FakeSimpleAccount(pub)
} else {
m, pubsBytes, ok := vm.ParseMultiSigContract(script)
if !ok {
- return nil, errors.New("parse verification script of signer #2: unknown witness type")
+ return nil, errors.New("failed to parse verification script of signer #2: unknown witness type")
}
pubs := make(keys.PublicKeys, len(pubsBytes))
for i := range pubs {
pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err)
+ return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err)
}
}
acc, err = notary.FakeMultisigAccount(m, pubs)
if err != nil {
- return nil, fmt.Errorf("create fake account for signer #2: %w", err)
+ return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err)
}
}
}
@@ -608,7 +618,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey())
err := multisigAccount.ConvertMultisig(m, ir)
if err != nil {
- return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err)
+ // wrap error as FrostFS-specific since the call is not related to any client
+ return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err))
}
} else {
// alphabet multisig redeem script is
@@ -616,7 +627,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
// inner ring multiaddress witness
multisigAccount, err = notary.FakeMultisigAccount(m, ir)
if err != nil {
- return nil, fmt.Errorf("make inner ring multisig wallet: %w", err)
+ // wrap error as FrostFS-specific since the call is not related to any client
+ return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err))
}
}
@@ -626,7 +638,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
func (c *Client) notaryTxValidationLimit() (uint32, error) {
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return 0, fmt.Errorf("get current blockchain height: %w", err)
+ return 0, fmt.Errorf("can't get current blockchain height: %w", err)
}
minTime := bc + c.notary.txValidTime
@@ -635,6 +647,24 @@ func (c *Client) notaryTxValidationLimit() (uint32, error) {
return rounded, nil
}
+func (c *Client) depositExpirationOf() (int64, error) {
+ expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash())
+ if err != nil {
+ return 0, fmt.Errorf("can't invoke method: %w", err)
+ }
+
+ if len(expirationRes) != 1 {
+ return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes))
+ }
+
+ currentTillBig, err := expirationRes[0].TryInteger()
+ if err != nil {
+ return 0, fmt.Errorf("can't parse deposit till value: %w", err)
+ }
+
+ return currentTillBig.Int64(), nil
+}
+
// sigCount returns the number of required signature.
// For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT).
// If committee is true, returns M as N/2+1.
@@ -708,12 +738,12 @@ func alreadyOnChainError(err error) bool {
func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) {
notaryBalance, err := c.GetNotaryDeposit()
if err != nil {
- return 0, fmt.Errorf("get notary balance: %w", err)
+ return 0, fmt.Errorf("could not get notary balance: %w", err)
}
gasBalance, err := c.GasBalance()
if err != nil {
- return 0, fmt.Errorf("get GAS balance: %w", err)
+ return 0, fmt.Errorf("could not get GAS balance: %w", err)
}
if gasBalance == 0 {
@@ -762,12 +792,12 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool)
if hash != nil {
height, err = c.getTransactionHeight(*hash)
if err != nil {
- return 0, 0, fmt.Errorf("get transaction height: %w", err)
+ return 0, 0, fmt.Errorf("could not get transaction height: %w", err)
}
} else {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- return 0, 0, fmt.Errorf("get chain height: %w", err)
+ return 0, 0, fmt.Errorf("could not get chain height: %w", err)
}
}
diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go
index c4eb120d2..dfcf62b83 100644
--- a/pkg/morph/client/static.go
+++ b/pkg/morph/client/static.go
@@ -1,10 +1,8 @@
package client
import (
- "context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -130,8 +128,7 @@ func (i *InvokePrmOptional) SetVUB(v uint32) {
}
type InvokeRes struct {
- Hash util.Uint256
- VUB uint32
+ VUB uint32
}
// Invoke calls Invoke method of Client with static internal script hash and fee.
@@ -143,7 +140,9 @@ type InvokeRes struct {
//
// If fee for the operation executed using specified method is customized, then StaticClient uses it.
// Otherwise, default fee is used.
-func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) {
+func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) {
+ var res InvokeRes
+ var err error
var vubP *uint32
if s.tryNotary {
if s.alpha {
@@ -160,7 +159,7 @@ func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, err
nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash)
}
if err != nil {
- return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err)
+ return InvokeRes{}, fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err)
}
vubP = &vub
@@ -170,23 +169,25 @@ func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, err
vubP = &prm.vub
}
- return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
+ res.VUB, err = s.client.NotaryInvoke(s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
+ return res, err
}
if prm.vub > 0 {
vubP = &prm.vub
}
- return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
+ res.VUB, err = s.client.NotaryInvokeNotAlpha(s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
+ return res, err
}
- return s.client.Invoke(
- ctx,
+ res.VUB, err = s.client.Invoke(
s.scScriptHash,
s.fee,
prm.method,
prm.args...,
)
+ return res, err
}
// TestInvokePrm groups parameters of the TestInvoke operation.
@@ -206,9 +207,7 @@ func (ti *TestInvokePrm) SetArgs(args ...any) {
}
// TestInvoke calls TestInvoke method of Client with static internal script hash.
-func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) {
- _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method)
- defer span.End()
+func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) {
return s.client.TestInvoke(
s.scScriptHash,
prm.method,
diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go
index f7b6705a8..cd55d6bd2 100644
--- a/pkg/morph/client/util.go
+++ b/pkg/morph/client/util.go
@@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) {
case stackitem.IntegerT:
n, err := param.TryInteger()
if err != nil {
- return nil, fmt.Errorf("parse integer bytes: %w", err)
+ return nil, fmt.Errorf("can't parse integer bytes: %w", err)
}
return n.Bytes(), nil
@@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) {
func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error {
return func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != HaltState {
- return ¬HaltStateError{state: r.State, exception: r.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
}
t.SystemFee += add
diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go
deleted file mode 100644
index 87fcf84b8..000000000
--- a/pkg/morph/client/waiter.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package client
-
-import (
- "context"
- "fmt"
-
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
-)
-
-type waiterClient struct {
- c *Client
-}
-
-func (w *waiterClient) Context() context.Context {
- return context.Background()
-}
-
-func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
- return w.c.GetApplicationLog(hash, trig)
-}
-
-func (w *waiterClient) GetBlockCount() (uint32, error) {
- return w.c.BlockCount()
-}
-
-func (w *waiterClient) GetVersion() (*result.Version, error) {
- return w.c.GetVersion()
-}
-
-// WaitTxHalt waits until transaction with the specified hash persists on the blockchain.
-// It also checks execution result to finish in HALT state.
-func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error {
- w, err := waiter.NewPollingBased(&waiterClient{c: c})
- if err != nil {
- return fmt.Errorf("create tx waiter: %w", err)
- }
-
- res, err := w.WaitAny(ctx, vub, h)
- if err != nil {
- return fmt.Errorf("wait until tx persists: %w", err)
- }
-
- if res.VMState.HasFlag(vmstate.Halt) {
- return nil
- }
- return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}
-}
diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go
index 99f80584a..062a2a886 100644
--- a/pkg/morph/event/balance/lock.go
+++ b/pkg/morph/event/balance/lock.go
@@ -3,7 +3,7 @@ package balance
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -47,17 +47,61 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash }
// ParseLock from notification into lock structure.
func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) {
- var le balance.LockEvent
- if err := le.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse balance.LockEvent: %w", err)
+ var (
+ ev Lock
+ err error
+ )
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Lock{
- id: le.TxID,
- user: le.From,
- lock: le.To,
- amount: le.Amount.Int64(),
- until: le.Until.Int64(),
- txHash: e.Container,
- }, nil
+ if ln := len(params); ln != 5 {
+ return nil, event.WrongNumberOfParameters(5, ln)
+ }
+
+ // parse id
+ ev.id, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock id: %w", err)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock user value: %w", err)
+ }
+
+ ev.user, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err)
+ }
+
+ // parse lock account
+ lock, err := client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock account value: %w", err)
+ }
+
+ ev.lock, err = util.Uint160DecodeBytesBE(lock)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.amount, err = client.IntFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock amount: %w", err)
+ }
+
+ // parse until deadline
+ ev.until, err = client.IntFromStackItem(params[4])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock deadline: %w", err)
+ }
+
+ ev.txHash = e.Container
+
+ return ev, nil
}
diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go
index 87b91aede..9199bcd55 100644
--- a/pkg/morph/event/balance/lock_test.go
+++ b/pkg/morph/event/balance/lock_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -27,7 +28,7 @@ func TestParseLock(t *testing.T) {
}
_, err := ParseLock(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error())
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go
index d28f6d521..a206307f8 100644
--- a/pkg/morph/event/container/delete.go
+++ b/pkg/morph/event/container/delete.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -58,14 +58,28 @@ func (DeleteSuccess) MorphEvent() {}
// ParseDeleteSuccess decodes notification event thrown by Container contract into
// DeleteSuccess and returns it as event.Event.
func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- var dse container.DeleteSuccessEvent
- if err := dse.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err)
+ items, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
}
- var cnr cid.ID
- cnr.SetSHA256(dse.ContainerID)
- return DeleteSuccess{
- ID: cnr,
- }, nil
+ const expectedItemNumDeleteSuccess = 1
+
+ if ln := len(items); ln != expectedItemNumDeleteSuccess {
+ return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln)
+ }
+
+ binID, err := client.BytesFromStackItem(items[0])
+ if err != nil {
+ return nil, fmt.Errorf("parse container ID item: %w", err)
+ }
+
+ var res DeleteSuccess
+
+ err = res.ID.Decode(binID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
+ }
+
+ return res, nil
}
diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go
index 62e7d7277..627c5fcf5 100644
--- a/pkg/morph/event/container/delete_test.go
+++ b/pkg/morph/event/container/delete_test.go
@@ -4,6 +4,7 @@ import (
"crypto/sha256"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -17,7 +18,7 @@ func TestParseDeleteSuccess(t *testing.T) {
}
_, err := ParseDeleteSuccess(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
})
t.Run("wrong container parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go
new file mode 100644
index 000000000..4168d8842
--- /dev/null
+++ b/pkg/morph/event/container/eacl.go
@@ -0,0 +1,51 @@
+package container
+
+import (
+ "github.com/nspcc-dev/neo-go/pkg/network/payload"
+)
+
+// SetEACL represents structure of notification about
+// modified eACL table coming from FrostFS Container contract.
+type SetEACL struct {
+ TableValue []byte
+ SignatureValue []byte
+ PublicKeyValue []byte
+ TokenValue []byte
+
+ // For notary notifications only.
+ // Contains raw transactions of notary request.
+ NotaryRequestValue *payload.P2PNotaryRequest
+}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (SetEACL) MorphEvent() {}
+
+// Table returns returns eACL table in a binary FrostFS API format.
+func (x SetEACL) Table() []byte {
+ return x.TableValue
+}
+
+// Signature returns signature of the binary table.
+func (x SetEACL) Signature() []byte {
+ return x.SignatureValue
+}
+
+// PublicKey returns public keys of container
+// owner in a binary format.
+func (x SetEACL) PublicKey() []byte {
+ return x.PublicKeyValue
+}
+
+// SessionToken returns binary token of the session
+// within which the eACL was set.
+func (x SetEACL) SessionToken() []byte {
+ return x.TokenValue
+}
+
+// NotaryRequest returns raw notary request if notification
+// was received via notary service. Otherwise, returns nil.
+func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest {
+ return x.NotaryRequestValue
+}
+
+const expectedItemNumEACL = 4
diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go
new file mode 100644
index 000000000..a4fe7c966
--- /dev/null
+++ b/pkg/morph/event/container/eacl_notary.go
@@ -0,0 +1,75 @@
+package container
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+)
+
+func (x *SetEACL) setTable(v []byte) {
+ if v != nil {
+ x.TableValue = v
+ }
+}
+
+func (x *SetEACL) setSignature(v []byte) {
+ if v != nil {
+ x.SignatureValue = v
+ }
+}
+
+func (x *SetEACL) setPublicKey(v []byte) {
+ if v != nil {
+ x.PublicKeyValue = v
+ }
+}
+
+func (x *SetEACL) setToken(v []byte) {
+ if v != nil {
+ x.TokenValue = v
+ }
+}
+
+var setEACLFieldSetters = []func(*SetEACL, []byte){
+ // order on stack is reversed
+ (*SetEACL).setToken,
+ (*SetEACL).setPublicKey,
+ (*SetEACL).setSignature,
+ (*SetEACL).setTable,
+}
+
+const (
+ // SetEACLNotaryEvent is method name for container EACL operations
+ // in `Container` contract. Is used as identificator for notary
+ // EACL changing requests.
+ SetEACLNotaryEvent = "setEACL"
+)
+
+// ParseSetEACLNotary from NotaryEvent into container event structure.
+func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) {
+ var (
+ ev SetEACL
+ currentOp opcode.Opcode
+ )
+
+ fieldNum := 0
+
+ for _, op := range ne.Params() {
+ currentOp = op.Code()
+
+ switch {
+ case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4:
+ if fieldNum == expectedItemNumEACL {
+ return nil, event.UnexpectedArgNumErr(SetEACLNotaryEvent)
+ }
+
+ setEACLFieldSetters[fieldNum](&ev, op.Param())
+ fieldNum++
+ default:
+ return nil, event.UnexpectedOpcode(SetEACLNotaryEvent, op.Code())
+ }
+ }
+
+ ev.NotaryRequestValue = ne.Raw()
+
+ return ev, nil
+}
diff --git a/pkg/morph/event/container/util_test.go b/pkg/morph/event/container/eacl_test.go
similarity index 100%
rename from pkg/morph/event/container/util_test.go
rename to pkg/morph/event/container/eacl_test.go
diff --git a/pkg/morph/event/container/estimates.go b/pkg/morph/event/container/estimates.go
new file mode 100644
index 000000000..9fd21e2b5
--- /dev/null
+++ b/pkg/morph/event/container/estimates.go
@@ -0,0 +1,78 @@
+package container
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+// StartEstimation structure of container.StartEstimation notification from
+// morph chain.
+type StartEstimation struct {
+ epoch uint64
+}
+
+// StopEstimation structure of container.StopEstimation notification from
+// morph chain.
+type StopEstimation struct {
+ epoch uint64
+}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (StartEstimation) MorphEvent() {}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (StopEstimation) MorphEvent() {}
+
+// Epoch returns epoch value for which to start container size estimation.
+func (s StartEstimation) Epoch() uint64 { return s.epoch }
+
+// Epoch returns epoch value for which to stop container size estimation.
+func (s StopEstimation) Epoch() uint64 { return s.epoch }
+
+// ParseStartEstimation from notification into container event structure.
+func ParseStartEstimation(e *state.ContainedNotificationEvent) (event.Event, error) {
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ epoch, err := parseEstimation(params)
+ if err != nil {
+ return nil, err
+ }
+
+ return StartEstimation{epoch: epoch}, nil
+}
+
+// ParseStopEstimation from notification into container event structure.
+func ParseStopEstimation(e *state.ContainedNotificationEvent) (event.Event, error) {
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ epoch, err := parseEstimation(params)
+ if err != nil {
+ return nil, err
+ }
+
+ return StopEstimation{epoch: epoch}, nil
+}
+
+func parseEstimation(params []stackitem.Item) (uint64, error) {
+ if ln := len(params); ln != 1 {
+ return 0, event.WrongNumberOfParameters(1, ln)
+ }
+
+ // parse container
+ epoch, err := client.IntFromStackItem(params[0])
+ if err != nil {
+ return 0, fmt.Errorf("could not get estimation epoch: %w", err)
+ }
+
+ return uint64(epoch), nil
+}
diff --git a/pkg/morph/event/container/estimates_test.go b/pkg/morph/event/container/estimates_test.go
new file mode 100644
index 000000000..be46e62c4
--- /dev/null
+++ b/pkg/morph/event/container/estimates_test.go
@@ -0,0 +1,80 @@
+package container
+
+import (
+ "math/big"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStartEstimation(t *testing.T) {
+ var epochNum uint64 = 100
+ epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum))
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseStartEstimation(createNotifyEventFromItems(prms))
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ })
+
+ t.Run("wrong estimation parameter", func(t *testing.T) {
+ _, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewMap(),
+ }))
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct behavior", func(t *testing.T) {
+ ev, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{
+ epochItem,
+ }))
+
+ require.NoError(t, err)
+
+ require.Equal(t, StartEstimation{
+ epochNum,
+ }, ev)
+ })
+}
+
+func TestStopEstimation(t *testing.T) {
+ var epochNum uint64 = 100
+ epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum))
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseStopEstimation(createNotifyEventFromItems(prms))
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ })
+
+ t.Run("wrong estimation parameter", func(t *testing.T) {
+ _, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewMap(),
+ }))
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct behavior", func(t *testing.T) {
+ ev, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{
+ epochItem,
+ }))
+
+ require.NoError(t, err)
+
+ require.Equal(t, StopEstimation{
+ epochNum,
+ }, ev)
+ })
+}
diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go
index b09394ba4..335034bf3 100644
--- a/pkg/morph/event/container/put.go
+++ b/pkg/morph/event/container/put.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -78,14 +78,33 @@ func (PutSuccess) MorphEvent() {}
// ParsePutSuccess decodes notification event thrown by Container contract into
// PutSuccess and returns it as event.Event.
func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- var pse container.PutSuccessEvent
- if err := pse.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err)
+ items, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
}
- var cnr cid.ID
- cnr.SetSHA256(pse.ContainerID)
- return PutSuccess{
- ID: cnr,
- }, nil
+ const expectedItemNumPutSuccess = 2
+
+ if ln := len(items); ln != expectedItemNumPutSuccess {
+ return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln)
+ }
+
+ binID, err := client.BytesFromStackItem(items[0])
+ if err != nil {
+ return nil, fmt.Errorf("parse container ID item: %w", err)
+ }
+
+ _, err = client.BytesFromStackItem(items[1])
+ if err != nil {
+ return nil, fmt.Errorf("parse public key item: %w", err)
+ }
+
+ var res PutSuccess
+
+ err = res.ID.Decode(binID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
+ }
+
+ return res, nil
}
diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go
index 6b2ee7b0a..f5779ced6 100644
--- a/pkg/morph/event/container/put_notary.go
+++ b/pkg/morph/event/container/put_notary.go
@@ -46,7 +46,7 @@ const (
// put container requests.
PutNotaryEvent = "put"
- // PutNamedNotaryEvent is an ID of notary "put named container" notification.
+ // PutNotaryEvent is an ID of notary "put named container" notification.
PutNamedNotaryEvent = "putNamed"
)
diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go
index dd5c7ea93..3622f9943 100644
--- a/pkg/morph/event/container/put_test.go
+++ b/pkg/morph/event/container/put_test.go
@@ -4,8 +4,8 @@ import (
"crypto/sha256"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -17,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) {
}
_, err := ParsePutSuccess(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
})
t.Run("wrong container ID parameter", func(t *testing.T) {
@@ -35,30 +35,18 @@ func TestParsePutSuccess(t *testing.T) {
id.Encode(binID)
t.Run("wrong public key parameter", func(t *testing.T) {
- t.Run("wrong type", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewMap(),
- }))
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewMap(),
+ }))
- require.Error(t, err)
- })
- t.Run("garbage data", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewByteArray([]byte("key")),
- }))
- require.Error(t, err)
- })
+ require.Error(t, err)
})
t.Run("correct behavior", func(t *testing.T) {
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(binID),
- stackitem.NewByteArray(pk.PublicKey().Bytes()),
+ stackitem.NewByteArray([]byte("key")),
}))
require.NoError(t, err)
diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go
index cf56464b8..eae2a23f5 100644
--- a/pkg/morph/event/frostfs/cheque.go
+++ b/pkg/morph/event/frostfs/cheque.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,20 +34,53 @@ func (c Cheque) LockAccount() util.Uint160 { return c.LockValue }
// ParseCheque from notification into cheque structure.
func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ce frostfs.ChequeEvent
- if err := ce.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err)
- }
+ var (
+ ev Cheque
+ err error
+ )
- lock, err := util.Uint160DecodeBytesBE(ce.LockAccount)
+ params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err)
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Cheque{
- IDValue: ce.Id,
- AmountValue: ce.Amount.Int64(),
- UserValue: ce.User,
- LockValue: lock,
- }, nil
+ if ln := len(params); ln != 4 {
+ return nil, event.WrongNumberOfParameters(4, ln)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque id: %w", err)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque user: %w", err)
+ }
+
+ ev.UserValue, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque amount: %w", err)
+ }
+
+ // parse lock account
+ lock, err := client.BytesFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque lock account: %w", err)
+ }
+
+ ev.LockValue, err = util.Uint160DecodeBytesBE(lock)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go
index d92b7922b..ab177757f 100644
--- a/pkg/morph/event/frostfs/cheque_test.go
+++ b/pkg/morph/event/frostfs/cheque_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -26,7 +27,7 @@ func TestParseCheque(t *testing.T) {
}
_, err := ParseCheque(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go
index 805e80f3c..4c87634c2 100644
--- a/pkg/morph/event/frostfs/config.go
+++ b/pkg/morph/event/frostfs/config.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -36,15 +36,39 @@ func (u Config) Key() []byte { return u.KeyValue }
func (u Config) Value() []byte { return u.ValueValue }
func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) {
- var sce frostfs.SetConfigEvent
- if err := sce.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err)
+ var (
+ ev Config
+ err error
+ )
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Config{
- KeyValue: sce.Key,
- ValueValue: sce.Value,
- IDValue: sce.Id,
- TxHashValue: e.Container,
- }, nil
+ if ln := len(params); ln != 3 {
+ return nil, event.WrongNumberOfParameters(3, ln)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config update id: %w", err)
+ }
+
+ // parse key
+ ev.KeyValue, err = client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config key: %w", err)
+ }
+
+ // parse value
+ ev.ValueValue, err = client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config value: %w", err)
+ }
+
+ ev.TxHashValue = e.Container
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go
index 8acc8c15c..dcd4201e4 100644
--- a/pkg/morph/event/frostfs/config_test.go
+++ b/pkg/morph/event/frostfs/config_test.go
@@ -3,6 +3,7 @@ package frostfs
import (
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -20,7 +21,7 @@ func TestParseConfig(t *testing.T) {
}
_, err := ParseConfig(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
})
t.Run("wrong first parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go
index fcb01577e..d8a3b82f0 100644
--- a/pkg/morph/event/frostfs/deposit.go
+++ b/pkg/morph/event/frostfs/deposit.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,15 +34,50 @@ func (d Deposit) Amount() int64 { return d.AmountValue }
// ParseDeposit notification into deposit structure.
func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) {
- var de frostfs.DepositEvent
- if err := de.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err)
+ var ev Deposit
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Deposit{
- IDValue: de.TxHash[:],
- AmountValue: de.Amount.Int64(),
- FromValue: de.From,
- ToValue: de.Receiver,
- }, nil
+ if ln := len(params); ln != 4 {
+ return nil, event.WrongNumberOfParameters(4, ln)
+ }
+
+ // parse from
+ from, err := client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit sender: %w", err)
+ }
+
+ ev.FromValue, err = util.Uint160DecodeBytesBE(from)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit amount: %w", err)
+ }
+
+ // parse to
+ to, err := client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit receiver: %w", err)
+ }
+
+ ev.ToValue, err = util.Uint160DecodeBytesBE(to)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit id: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go
index 38d3e61f6..f279a7f9c 100644
--- a/pkg/morph/event/frostfs/deposit_test.go
+++ b/pkg/morph/event/frostfs/deposit_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -11,7 +12,7 @@ import (
func TestParseDeposit(t *testing.T) {
var (
- id = util.Uint256{0, 1, 2, 3}
+ id = []byte("Hello World")
from = util.Uint160{0x1, 0x2, 0x3}
to = util.Uint160{0x3, 0x2, 0x1}
@@ -25,7 +26,7 @@ func TestParseDeposit(t *testing.T) {
}
_, err := ParseDeposit(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
})
t.Run("wrong from parameter", func(t *testing.T) {
@@ -71,12 +72,12 @@ func TestParseDeposit(t *testing.T) {
stackitem.NewByteArray(from.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
stackitem.NewByteArray(to.BytesBE()),
- stackitem.NewByteArray(id[:]),
+ stackitem.NewByteArray(id),
}))
require.NoError(t, err)
require.Equal(t, Deposit{
- IDValue: id[:],
+ IDValue: id,
AmountValue: amount,
FromValue: from,
ToValue: to,
diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go
new file mode 100644
index 000000000..62203540f
--- /dev/null
+++ b/pkg/morph/event/frostfs/ir_update.go
@@ -0,0 +1,54 @@
+package frostfs
+
+import (
+ "crypto/elliptic"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+type UpdateInnerRing struct {
+ keys []*keys.PublicKey
+}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (UpdateInnerRing) MorphEvent() {}
+
+func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys }
+
+func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) {
+ var (
+ ev UpdateInnerRing
+ err error
+ )
+
+ if ln := len(params); ln != 1 {
+ return nil, event.WrongNumberOfParameters(1, ln)
+ }
+
+ // parse keys
+ irKeys, err := client.ArrayFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get updated inner ring keys: %w", err)
+ }
+
+ ev.keys = make([]*keys.PublicKey, 0, len(irKeys))
+ for i := range irKeys {
+ rawKey, err := client.BytesFromStackItem(irKeys[i])
+ if err != nil {
+ return nil, fmt.Errorf("could not get updated inner ring public key: %w", err)
+ }
+
+ key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err)
+ }
+
+ ev.keys = append(ev.keys, key)
+ }
+
+ return ev, nil
+}
diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go
new file mode 100644
index 000000000..fae87e5f9
--- /dev/null
+++ b/pkg/morph/event/frostfs/ir_update_test.go
@@ -0,0 +1,57 @@
+package frostfs
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/stretchr/testify/require"
+)
+
+func genKey(t *testing.T) *keys.PrivateKey {
+ priv, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ return priv
+}
+
+func TestParseUpdateInnerRing(t *testing.T) {
+ publicKeys := []*keys.PublicKey{
+ genKey(t).PublicKey(),
+ genKey(t).PublicKey(),
+ genKey(t).PublicKey(),
+ }
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseUpdateInnerRing(prms)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ })
+
+ t.Run("wrong first parameter", func(t *testing.T) {
+ _, err := ParseUpdateInnerRing([]stackitem.Item{
+ stackitem.NewMap(),
+ })
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct", func(t *testing.T) {
+ ev, err := ParseUpdateInnerRing([]stackitem.Item{
+ stackitem.NewArray([]stackitem.Item{
+ stackitem.NewByteArray(publicKeys[0].Bytes()),
+ stackitem.NewByteArray(publicKeys[1].Bytes()),
+ stackitem.NewByteArray(publicKeys[2].Bytes()),
+ }),
+ })
+ require.NoError(t, err)
+
+ require.Equal(t, UpdateInnerRing{
+ keys: publicKeys,
+ }, ev)
+ })
+}
diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go
index 2568b6512..f48067f86 100644
--- a/pkg/morph/event/frostfs/withdraw.go
+++ b/pkg/morph/event/frostfs/withdraw.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -30,14 +30,39 @@ func (w Withdraw) Amount() int64 { return w.AmountValue }
// ParseWithdraw notification into withdraw structure.
func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) {
- var we frostfs.WithdrawEvent
- if err := we.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err)
+ var ev Withdraw
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Withdraw{
- IDValue: we.TxHash[:],
- AmountValue: we.Amount.Int64(),
- UserValue: we.User,
- }, nil
+ if ln := len(params); ln != 3 {
+ return nil, event.WrongNumberOfParameters(3, ln)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw user: %w", err)
+ }
+
+ ev.UserValue, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw amount: %w", err)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw id: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go
index e382305e6..33435d19a 100644
--- a/pkg/morph/event/frostfs/withdraw_test.go
+++ b/pkg/morph/event/frostfs/withdraw_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -11,7 +12,7 @@ import (
func TestParseWithdraw(t *testing.T) {
var (
- id = util.Uint256{1, 2, 3}
+ id = []byte("Hello World")
user = util.Uint160{0x1, 0x2, 0x3}
amount int64 = 10
@@ -24,7 +25,7 @@ func TestParseWithdraw(t *testing.T) {
}
_, err := ParseWithdraw(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
})
t.Run("wrong user parameter", func(t *testing.T) {
@@ -58,12 +59,12 @@ func TestParseWithdraw(t *testing.T) {
ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(user.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
- stackitem.NewByteArray(id[:]),
+ stackitem.NewByteArray(id),
}))
require.NoError(t, err)
require.Equal(t, Withdraw{
- IDValue: id[:],
+ IDValue: id,
AmountValue: amount,
UserValue: user,
}, ev)
diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go
index 55a514ff1..182b4667e 100644
--- a/pkg/morph/event/handlers.go
+++ b/pkg/morph/event/handlers.go
@@ -1,26 +1,32 @@
package event
import (
- "context"
-
"github.com/nspcc-dev/neo-go/pkg/core/block"
- "github.com/nspcc-dev/neo-go/pkg/util"
)
// Handler is an Event processing function.
-type Handler func(context.Context, Event)
+type Handler func(Event)
// BlockHandler is a chain block processing function.
-type BlockHandler func(context.Context, *block.Block)
+type BlockHandler func(*block.Block)
// NotificationHandlerInfo is a structure that groups
// the parameters of the handler of particular
// contract event.
type NotificationHandlerInfo struct {
- Contract util.Uint160
- Type Type
- Parser NotificationParser
- Handlers []Handler
+ scriptHashWithType
+
+ h Handler
+}
+
+// SetHandler is an event handler setter.
+func (s *NotificationHandlerInfo) SetHandler(v Handler) {
+ s.h = v
+}
+
+// Handler returns an event handler.
+func (s NotificationHandlerInfo) Handler() Handler {
+ return s.h
}
// NotaryHandlerInfo is a structure that groups
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index e5cdfeef7..dd3c7d216 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -33,6 +33,13 @@ type Listener interface {
// it could not be started.
ListenWithError(context.Context, chan<- error)
+ // SetNotificationParser must set the parser of particular contract event.
+ //
+ // Parser of each event must be set once. All parsers must be set before Listen call.
+ //
+ // Must ignore nil parsers and all calls after listener has been started.
+ SetNotificationParser(NotificationParserInfo)
+
// RegisterNotificationHandler must register the event handler for particular notification event of contract.
//
// The specified handler must be called after each capture and parsing of the event.
@@ -93,6 +100,8 @@ type listener struct {
startOnce, stopOnce sync.Once
+ started bool
+
notificationParsers map[scriptHashWithType]NotificationParser
notificationHandlers map[scriptHashWithType][]Handler
@@ -111,7 +120,7 @@ type listener struct {
pool *ants.Pool
}
-const newListenerFailMsg = "instantiate Listener"
+const newListenerFailMsg = "could not instantiate Listener"
var (
errNilLogger = errors.New("nil logger")
@@ -134,8 +143,11 @@ func (l *listener) Listen(ctx context.Context) {
l.startOnce.Do(func() {
l.wg.Add(1)
defer l.wg.Done()
-
- l.listen(ctx, nil)
+ if err := l.listen(ctx, nil); err != nil {
+ l.log.Error(logs.EventCouldNotStartListenToEvents,
+ zap.String("error", err.Error()),
+ )
+ }
})
}
@@ -149,17 +161,26 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
l.startOnce.Do(func() {
l.wg.Add(1)
defer l.wg.Done()
-
- l.listen(ctx, intError)
+ if err := l.listen(ctx, intError); err != nil {
+ l.log.Error(logs.EventCouldNotStartListenToEvents,
+ zap.String("error", err.Error()),
+ )
+ l.sendError(ctx, intError, err)
+ }
})
}
-func (l *listener) listen(ctx context.Context, intError chan<- error) {
+func (l *listener) listen(ctx context.Context, intError chan<- error) error {
+ // mark listener as started
+ l.started = true
+
subErrCh := make(chan error)
go l.subscribe(subErrCh)
l.listenLoop(ctx, intError, subErrCh)
+
+ return nil
}
func (l *listener) subscribe(errCh chan error) {
@@ -171,7 +192,7 @@ func (l *listener) subscribe(errCh chan error) {
// fill the list with the contracts with set event parsers.
l.mtx.RLock()
for hashType := range l.notificationParsers {
- scHash := hashType.Hash
+ scHash := hashType.ScriptHash()
// prevent repetitions
for _, hash := range hashes {
@@ -180,26 +201,26 @@ func (l *listener) subscribe(errCh chan error) {
}
}
- hashes = append(hashes, hashType.Hash)
+ hashes = append(hashes, hashType.ScriptHash())
}
l.mtx.RUnlock()
err := l.subscriber.SubscribeForNotification(hashes...)
if err != nil {
- errCh <- fmt.Errorf("subscribe for notifications: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for notifications: %w", err)
return
}
if len(l.blockHandlers) > 0 {
if err = l.subscriber.BlockNotifications(); err != nil {
- errCh <- fmt.Errorf("subscribe for blocks: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for blocks: %w", err)
return
}
}
if l.listenNotary {
if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
- errCh <- fmt.Errorf("subscribe for notary requests: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err)
return
}
}
@@ -213,7 +234,7 @@ func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error
// in the same routine when shutting down node.
select {
case <-ctx.Done():
- l.log.Info(ctx, logs.EventStopEventListenerByContext,
+ l.log.Info(logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
return false
@@ -230,81 +251,81 @@ loop:
select {
case err := <-subErrCh:
if !l.sendError(ctx, intErr, err) {
- l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err))
+ l.log.Error(logs.EventStopEventListenerByError, zap.Error(err))
}
break loop
case <-ctx.Done():
- l.log.Info(ctx, logs.EventStopEventListenerByContext,
+ l.log.Info(logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
break loop
case notifyEvent, ok := <-chs.NotificationsCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel)
+ l.log.Warn(logs.EventStopEventListenerByNotificationChannel)
l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated)
break loop
} else if notifyEvent == nil {
- l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught)
+ l.log.Warn(logs.EventNilNotificationEventWasCaught)
continue loop
}
- l.handleNotifyEvent(ctx, notifyEvent)
+ l.handleNotifyEvent(notifyEvent)
case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel)
+ l.log.Warn(logs.EventStopEventListenerByNotaryChannel)
l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated)
break loop
} else if notaryEvent == nil {
- l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught)
+ l.log.Warn(logs.EventNilNotaryEventWasCaught)
continue loop
}
- l.handleNotaryEvent(ctx, notaryEvent)
+ l.handleNotaryEvent(notaryEvent)
case b, ok := <-chs.BlockCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel)
+ l.log.Warn(logs.EventStopEventListenerByBlockChannel)
l.sendError(ctx, intErr, errBlockNotificationChannelClosed)
break loop
} else if b == nil {
- l.log.Warn(ctx, logs.EventNilBlockWasCaught)
+ l.log.Warn(logs.EventNilBlockWasCaught)
continue loop
}
- l.handleBlockEvent(ctx, b)
+ l.handleBlockEvent(b)
}
}
}
-func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) {
+func (l *listener) handleBlockEvent(b *block.Block) {
if err := l.pool.Submit(func() {
for i := range l.blockHandlers {
- l.blockHandlers[i](ctx, b)
+ l.blockHandlers[i](b)
}
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) {
+func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotary(ctx, notaryEvent)
+ l.parseAndHandleNotary(notaryEvent)
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotification(ctx, notifyEvent)
+ l.parseAndHandleNotification(notifyEvent)
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) {
log := l.log.With(
zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()),
)
@@ -317,14 +338,16 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
)
// get the event parser
- keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent}
+ keyEvent := scriptHashWithType{}
+ keyEvent.SetScriptHash(notifyEvent.ScriptHash)
+ keyEvent.SetType(typEvent)
l.mtx.RLock()
parser, ok := l.notificationParsers[keyEvent]
l.mtx.RUnlock()
if !ok {
- log.Debug(ctx, logs.EventEventParserNotSet)
+ log.Debug(logs.EventEventParserNotSet)
return
}
@@ -332,8 +355,8 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
// parse the notification event
event, err := parser(notifyEvent)
if err != nil {
- log.Warn(ctx, logs.EventCouldNotParseNotificationEvent,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotParseNotificationEvent,
+ zap.String("error", err.Error()),
)
return
@@ -345,7 +368,7 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
l.mtx.RUnlock()
if len(handlers) == 0 {
- log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
@@ -353,11 +376,11 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
}
for _, handler := range handlers {
- handler(ctx, event)
+ handler(event)
}
}
-func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) {
+func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
// prepare the notary event
notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest)
if err != nil {
@@ -365,14 +388,14 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
switch {
case errors.Is(err, ErrTXAlreadyHandled):
case errors.As(err, &expErr):
- l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent,
- zap.Error(err),
+ l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent,
+ zap.String("error", err.Error()),
zap.Uint32("current_block_height", expErr.CurrentBlockHeight),
zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight),
)
default:
- l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent,
- zap.Error(err),
+ l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent,
+ zap.String("error", err.Error()),
)
}
@@ -395,7 +418,7 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
l.mtx.RUnlock()
if !ok {
- log.Debug(ctx, logs.EventNotaryParserNotSet)
+ log.Debug(logs.EventNotaryParserNotSet)
return
}
@@ -403,8 +426,8 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
// parse the notary event
event, err := parser(notaryEvent)
if err != nil {
- log.Warn(ctx, logs.EventCouldNotParseNotaryEvent,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotParseNotaryEvent,
+ zap.String("error", err.Error()),
)
return
@@ -416,14 +439,47 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
l.mtx.RUnlock()
if !ok {
- log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
return
}
- handler(ctx, event)
+ handler(event)
+}
+
+// SetNotificationParser sets the parser of particular contract event.
+//
+// Ignores nil and already set parsers.
+// Ignores the parser if listener is started.
+func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
+ log := l.log.With(
+ zap.String("contract", pi.ScriptHash().StringLE()),
+ zap.Stringer("event_type", pi.getType()),
+ )
+
+ parser := pi.parser()
+ if parser == nil {
+ log.Info(logs.EventIgnoreNilEventParser)
+ return
+ }
+
+ l.mtx.Lock()
+ defer l.mtx.Unlock()
+
+ // check if the listener was started
+ if l.started {
+ log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser)
+ return
+ }
+
+ // add event parser
+ if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok {
+ l.notificationParsers[pi.scriptHashWithType] = pi.parser()
+ }
+
+ log.Debug(logs.EventRegisteredNewEventParser)
}
// RegisterNotificationHandler registers the handler for particular notification event of contract.
@@ -432,23 +488,35 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
// Ignores handlers of event without parser.
func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
log := l.log.With(
- zap.String("contract", hi.Contract.StringLE()),
- zap.Stringer("event_type", hi.Type),
+ zap.String("contract", hi.ScriptHash().StringLE()),
+ zap.Stringer("event_type", hi.GetType()),
)
+ handler := hi.Handler()
+ if handler == nil {
+ log.Warn(logs.EventIgnoreNilEventHandler)
+ return
+ }
+
// check if parser was set
+ l.mtx.RLock()
+ _, ok := l.notificationParsers[hi.scriptHashWithType]
+ l.mtx.RUnlock()
+
+ if !ok {
+ log.Warn(logs.EventIgnoreHandlerOfEventWoParser)
+ return
+ }
+
+ // add event handler
l.mtx.Lock()
- defer l.mtx.Unlock()
-
- k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type}
-
- l.notificationParsers[k] = hi.Parser
- l.notificationHandlers[k] = append(
- l.notificationHandlers[k],
- hi.Handlers...,
+ l.notificationHandlers[hi.scriptHashWithType] = append(
+ l.notificationHandlers[hi.scriptHashWithType],
+ hi.Handler(),
)
+ l.mtx.Unlock()
- log.Debug(context.Background(), logs.EventRegisteredNewEventHandler)
+ log.Debug(logs.EventRegisteredNewEventHandler)
}
// EnableNotarySupport enables notary request listening. Passed hash is
@@ -487,15 +555,27 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
zap.Stringer("notary_type", pi.RequestType()),
)
+ parser := pi.parser()
+ if parser == nil {
+ log.Info(logs.EventIgnoreNilNotaryEventParser)
+ return
+ }
+
l.mtx.Lock()
defer l.mtx.Unlock()
+ // check if the listener was started
+ if l.started {
+ log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser)
+ return
+ }
+
// add event parser
if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok {
l.notaryParsers[pi.notaryRequestTypes] = pi.parser()
}
- log.Info(context.Background(), logs.EventRegisteredNewEventParser)
+ log.Info(logs.EventRegisteredNewEventParser)
}
// RegisterNotaryHandler registers the handler for particular notification notary request event.
@@ -513,13 +593,19 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
zap.Stringer("notary type", hi.RequestType()),
)
+ handler := hi.Handler()
+ if handler == nil {
+ log.Warn(logs.EventIgnoreNilNotaryEventHandler)
+ return
+ }
+
// check if parser was set
l.mtx.RLock()
_, ok := l.notaryParsers[hi.notaryRequestTypes]
l.mtx.RUnlock()
if !ok {
- log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser)
+ log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser)
return
}
@@ -528,7 +614,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler()
l.mtx.Unlock()
- log.Info(context.Background(), logs.EventRegisteredNewEventHandler)
+ log.Info(logs.EventRegisteredNewEventHandler)
}
// Stop closes subscription channel with remote neo node.
@@ -541,6 +627,11 @@ func (l *listener) Stop() {
}
func (l *listener) RegisterBlockHandler(handler BlockHandler) {
+ if handler == nil {
+ l.log.Warn(logs.EventIgnoreNilBlockHandler)
+ return
+ }
+
l.blockHandlers = append(l.blockHandlers, handler)
}
@@ -557,7 +648,7 @@ func NewListener(p ListenerParams) (Listener, error) {
// The default capacity is 0, which means "infinite".
pool, err := ants.NewPool(p.WorkerPoolCapacity)
if err != nil {
- return nil, fmt.Errorf("init worker pool: %w", err)
+ return nil, fmt.Errorf("could not init worker pool: %w", err)
}
return &listener{
diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go
index 87f37305f..5f7cf9f43 100644
--- a/pkg/morph/event/listener_test.go
+++ b/pkg/morph/event/listener_test.go
@@ -34,24 +34,34 @@ func TestEventHandling(t *testing.T) {
blockHandled := make(chan bool)
handledBlocks := make([]*block.Block, 0)
- l.RegisterBlockHandler(func(_ context.Context, b *block.Block) {
+ l.RegisterBlockHandler(func(b *block.Block) {
handledBlocks = append(handledBlocks, b)
blockHandled <- true
})
+ key := scriptHashWithType{
+ scriptHashValue: scriptHashValue{
+ hash: util.Uint160{100},
+ },
+ typeValue: typeValue{
+ typ: TypeFromString("notification type"),
+ },
+ }
+
+ l.SetNotificationParser(NotificationParserInfo{
+ scriptHashWithType: key,
+ p: func(cne *state.ContainedNotificationEvent) (Event, error) {
+ return testNotificationEvent{source: cne}, nil
+ },
+ })
+
notificationHandled := make(chan bool)
handledNotifications := make([]Event, 0)
l.RegisterNotificationHandler(NotificationHandlerInfo{
- Contract: util.Uint160{100},
- Type: TypeFromString("notification type"),
- Parser: func(cne *state.ContainedNotificationEvent) (Event, error) {
- return testNotificationEvent{source: cne}, nil
- },
- Handlers: []Handler{
- func(_ context.Context, e Event) {
- handledNotifications = append(handledNotifications, e)
- notificationHandled <- true
- },
+ scriptHashWithType: key,
+ h: func(e Event) {
+ handledNotifications = append(handledNotifications, e)
+ notificationHandled <- true
},
})
@@ -127,7 +137,7 @@ func TestErrorPassing(t *testing.T) {
WorkerPoolCapacity: 10,
})
require.NoError(t, err, "failed to create listener")
- l.RegisterBlockHandler(func(context.Context, *block.Block) {})
+ l.RegisterBlockHandler(func(b *block.Block) {})
errCh := make(chan error)
diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go
index 39c8f6237..e454e2a6a 100644
--- a/pkg/morph/event/netmap/epoch.go
+++ b/pkg/morph/event/netmap/epoch.go
@@ -1,7 +1,9 @@
package netmap
import (
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -35,13 +37,22 @@ func (s NewEpoch) TxHash() util.Uint256 {
//
// Result is type of NewEpoch.
func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) {
- var nee netmap.NewEpochEvent
- if err := nee.FromStackItem(e.Item); err != nil {
- return nil, err
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ if ln := len(params); ln != 1 {
+ return nil, event.WrongNumberOfParameters(1, ln)
+ }
+
+ prmEpochNum, err := client.IntFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get integer epoch number: %w", err)
}
return NewEpoch{
- Num: nee.Epoch.Uint64(),
+ Num: uint64(prmEpochNum),
Hash: e.Container,
}, nil
}
diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go
index 6ff692327..bc267ecb6 100644
--- a/pkg/morph/event/netmap/epoch_test.go
+++ b/pkg/morph/event/netmap/epoch_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -16,7 +17,7 @@ func TestParseNewEpoch(t *testing.T) {
}
_, err := ParseNewEpoch(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
})
t.Run("wrong first parameter type", func(t *testing.T) {
diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go
index 993182ab4..0260810b8 100644
--- a/pkg/morph/event/netmap/update_peer_notary.go
+++ b/pkg/morph/event/netmap/update_peer_notary.go
@@ -10,7 +10,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
)
-var errNilPubKey = errors.New("public key is nil")
+var errNilPubKey = errors.New("could not parse public key: public key is nil")
func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
if v == nil {
@@ -19,7 +19,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256())
if err != nil {
- return fmt.Errorf("parse public key: %w", err)
+ return fmt.Errorf("could not parse public key: %w", err)
}
return
diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go
index b11973646..37091f768 100644
--- a/pkg/morph/event/notary_preparator.go
+++ b/pkg/morph/event/notary_preparator.go
@@ -127,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
for {
opCode, param, err = ctx.Next()
if err != nil {
- return nil, fmt.Errorf("get next opcode in script: %w", err)
+ return nil, fmt.Errorf("could not get next opcode in script: %w", err)
}
if opCode == opcode.RET {
@@ -147,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
// retrieve contract's script hash
contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param)
if err != nil {
- return nil, fmt.Errorf("decode contract hash: %w", err)
+ return nil, fmt.Errorf("could not decode contract hash: %w", err)
}
// retrieve contract's method
@@ -164,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
if len(args) != 0 {
err = p.validateParameterOpcodes(args)
if err != nil {
- return nil, fmt.Errorf("validate arguments: %w", err)
+ return nil, fmt.Errorf("could not validate arguments: %w", err)
}
// without args packing opcodes
@@ -199,14 +199,14 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error {
// neo-go API)
//
// this check prevents notary flow recursion
- if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 &&
- !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version
+ if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 ||
+ bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version
return ErrTXAlreadyHandled
}
currentAlphabet, err := p.alphaKeys()
if err != nil {
- return fmt.Errorf("fetch Alphabet public keys: %w", err)
+ return fmt.Errorf("could not fetch Alphabet public keys: %w", err)
}
err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
@@ -239,7 +239,7 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error {
argsLen, err := IntFromOpcode(ops[l-2])
if err != nil {
- return fmt.Errorf("parse argument len: %w", err)
+ return fmt.Errorf("could not parse argument len: %w", err)
}
err = validateNestedArgs(argsLen, ops[:l-2])
@@ -273,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error {
argsLen, err := IntFromOpcode(ops[i-1])
if err != nil {
- return fmt.Errorf("parse argument len: %w", err)
+ return fmt.Errorf("could not parse argument len: %w", err)
}
expArgLen += argsLen + 1
@@ -307,7 +307,7 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error {
currBlock, err := p.blockCounter.BlockCount()
if err != nil {
- return fmt.Errorf("fetch current chain height: %w", err)
+ return fmt.Errorf("could not fetch current chain height: %w", err)
}
if currBlock >= nvb.Height {
@@ -327,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("get Alphabet verification script: %w", err)
+ return fmt.Errorf("could not get Alphabet verification script: %w", err)
}
if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) {
@@ -346,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("get Alphabet verification script: %w", err)
+ return fmt.Errorf("could not get Alphabet verification script: %w", err)
}
// the second one must be witness of the current
@@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
// the last one must be a placeholder for notary contract witness
last := len(w) - 1
- if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981
- !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
+ if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981
+ bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
len(w[last].VerificationScript) != 0 {
return errIncorrectNotaryPlaceholder
}
diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go
index 60ddb4601..4c269bcbd 100644
--- a/pkg/morph/event/notary_preparator_test.go
+++ b/pkg/morph/event/notary_preparator_test.go
@@ -25,7 +25,7 @@ var (
alphaKeys keys.PublicKeys
wrongAlphaKeys keys.PublicKeys
- dummyAlphabetInvocationScript []byte
+ dummyAlphabetInvocationScript = []byte{} // expected to be empty if generated by Notary Actor, as requester can't fill it in
dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually
wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...)
diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go
index 5adeb4b30..90eff0bd2 100644
--- a/pkg/morph/event/parsers.go
+++ b/pkg/morph/event/parsers.go
@@ -11,6 +11,15 @@ import (
// from the StackItem list.
type NotificationParser func(*state.ContainedNotificationEvent) (Event, error)
+// NotificationParserInfo is a structure that groups
+// the parameters of particular contract
+// notification event parser.
+type NotificationParserInfo struct {
+ scriptHashWithType
+
+ p NotificationParser
+}
+
// NotaryPreparator constructs NotaryEvent
// from the NotaryRequest event.
type NotaryPreparator interface {
@@ -38,6 +47,24 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) {
n.p = p
}
+// SetParser is an event parser setter.
+func (s *NotificationParserInfo) SetParser(v NotificationParser) {
+ s.p = v
+}
+
+func (s NotificationParserInfo) parser() NotificationParser {
+ return s.p
+}
+
+// SetType is an event type setter.
+func (s *NotificationParserInfo) SetType(v Type) {
+ s.typ = v
+}
+
+func (s NotificationParserInfo) getType() Type {
+ return s.typ
+}
+
type wrongPrmNumber struct {
exp, act int
}
diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go
index b384e436b..28c968046 100644
--- a/pkg/morph/event/rolemanagement/designate.go
+++ b/pkg/morph/event/rolemanagement/designate.go
@@ -26,7 +26,7 @@ func (Designate) MorphEvent() {}
func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) {
params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
if len(params) != 2 {
diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go
index 0088be400..f3b6443fb 100644
--- a/pkg/morph/event/utils.go
+++ b/pkg/morph/event/utils.go
@@ -1,7 +1,6 @@
package event
import (
- "context"
"errors"
"fmt"
@@ -20,9 +19,13 @@ type scriptHashValue struct {
hash util.Uint160
}
+type typeValue struct {
+ typ Type
+}
+
type scriptHashWithType struct {
- Hash util.Uint160
- Type Type
+ scriptHashValue
+ typeValue
}
type notaryRequestTypes struct {
@@ -69,15 +72,25 @@ func (s scriptHashValue) ScriptHash() util.Uint160 {
return s.hash
}
+// SetType is an event type setter.
+func (s *typeValue) SetType(v Type) {
+ s.typ = v
+}
+
+// GetType is an event type getter.
+func (s typeValue) GetType() Type {
+ return s.typ
+}
+
// WorkerPoolHandler sets closure over worker pool w with passed handler h.
func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler {
- return func(ctx context.Context, e Event) {
+ return func(e Event) {
err := w.Submit(func() {
- h(ctx, e)
+ h(e)
})
if err != nil {
- log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool,
+ zap.String("error", err.Error()),
)
}
}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index 4ef59ed6a..ee5466a7d 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -245,16 +245,16 @@ routeloop:
}
func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool {
- s.log.Info(ctx, logs.RPConnectionLost)
+ s.log.Info(logs.RPConnectionLost)
if !s.client.SwitchRPC(ctx) {
- s.log.Error(ctx, logs.RPCNodeSwitchFailure)
+ s.log.Error(logs.RPCNodeSwitchFailure)
return false
}
s.Lock()
chs := newSubChannels()
go func() {
- finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
+ finishCh <- s.restoreSubscriptions(chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
}()
s.current = chs
s.Unlock()
@@ -295,7 +295,7 @@ drainloop:
// restoreSubscriptions restores subscriptions according to
// cached information about them.
-func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent,
+func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotificationEvent,
blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent,
) bool {
var err error
@@ -304,7 +304,7 @@ func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *s
if s.subscribedToNewBlocks {
_, err = s.client.ReceiveBlocks(blCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
@@ -313,7 +313,7 @@ func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *s
for contract := range s.subscribedEvents {
_, err = s.client.ReceiveExecutionNotifications(contract, notifCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
@@ -322,7 +322,7 @@ func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *s
for signer := range s.subscribedNotaryEvents {
_, err = s.client.ReceiveNotaryRequests(signer, notaryCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go
index 974be1120..be20d3571 100644
--- a/pkg/morph/timer/block.go
+++ b/pkg/morph/timer/block.go
@@ -15,19 +15,41 @@ type BlockTickHandler func()
// It can tick the blocks and perform certain actions
// on block time intervals.
type BlockTimer struct {
+ rolledBack bool
+
mtx sync.Mutex
dur BlockMeter
baseDur uint32
+ mul, div uint32
+
cur, tgt uint32
last uint32
h BlockTickHandler
+ ps []BlockTimer
+
once bool
+
+ deltaCfg
+}
+
+// DeltaOption is an option of delta-interval handler.
+type DeltaOption func(*deltaCfg)
+
+type deltaCfg struct {
+ pulse bool
+}
+
+// WithPulse returns option to call delta-interval handler multiple times.
+func WithPulse() DeltaOption {
+ return func(c *deltaCfg) {
+ c.pulse = true
+ }
}
// StaticBlockMeter returns BlockMeters that always returns (d, nil).
@@ -43,19 +65,52 @@ func StaticBlockMeter(d uint32) BlockMeter {
func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
+ mul: 1,
+ div: 1,
h: h,
+ deltaCfg: deltaCfg{
+ pulse: true,
+ },
}
}
// NewOneTickTimer creates a new BlockTimer that ticks only once.
+//
+// Do not use delta handlers with pulse in this timer.
func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
+ mul: 1,
+ div: 1,
h: h,
once: true,
}
}
+// OnDelta registers handler which is executed on (mul / div * BlockMeter()) block
+// after basic interval reset.
+//
+// If WithPulse option is provided, handler is executed (mul / div * BlockMeter()) block
+// during base interval.
+func (t *BlockTimer) OnDelta(mul, div uint32, h BlockTickHandler, opts ...DeltaOption) {
+ c := deltaCfg{
+ pulse: false,
+ }
+
+ for i := range opts {
+ opts[i](&c)
+ }
+
+ t.ps = append(t.ps, BlockTimer{
+ mul: mul,
+ div: div,
+ h: h,
+ once: t.once,
+
+ deltaCfg: c,
+ })
+}
+
// Reset resets previous ticks of the BlockTimer.
//
// Returns BlockMeter's error upon occurrence.
@@ -69,18 +124,29 @@ func (t *BlockTimer) Reset() error {
t.resetWithBaseInterval(d)
+ for i := range t.ps {
+ t.ps[i].resetWithBaseInterval(d)
+ }
+
t.mtx.Unlock()
return nil
}
func (t *BlockTimer) resetWithBaseInterval(d uint32) {
+ t.rolledBack = false
t.baseDur = d
t.reset()
}
func (t *BlockTimer) reset() {
- delta := t.baseDur
+ mul, div := t.mul, t.div
+
+ if !t.pulse && t.rolledBack && mul < div {
+ mul, div = 1, 1
+ }
+
+ delta := mul * t.baseDur / div
if delta == 0 {
delta = 1
}
@@ -114,7 +180,12 @@ func (t *BlockTimer) tick(h uint32) {
if !t.once {
t.cur = 0
+ t.rolledBack = true
t.reset()
}
}
+
+ for i := range t.ps {
+ t.ps[i].tick(h)
+ }
}
diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go
index a144b3db6..7929754c1 100644
--- a/pkg/morph/timer/block_test.go
+++ b/pkg/morph/timer/block_test.go
@@ -1,7 +1,6 @@
package timer_test
import (
- "errors"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
@@ -9,7 +8,7 @@ import (
)
func tickN(t *timer.BlockTimer, n uint32) {
- for range n {
+ for i := uint32(0); i < n; i++ {
t.Tick(0)
}
}
@@ -18,7 +17,7 @@ func tickN(t *timer.BlockTimer, n uint32) {
// "resetting" consists of ticking the current height as well and invoking `Reset`.
func TestIRBlockTimer_Reset(t *testing.T) {
var baseCounter [2]int
- const blockDur = uint32(3)
+ blockDur := uint32(3)
bt1 := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
@@ -49,40 +48,8 @@ func TestIRBlockTimer_Reset(t *testing.T) {
require.Equal(t, baseCounter[0], baseCounter[1])
}
-func TestBlockTimer_ResetChangeDuration(t *testing.T) {
- var dur uint32 = 2
- var err error
- var counter int
-
- bt := timer.NewBlockTimer(
- func() (uint32, error) { return dur, err },
- func() { counter++ })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 2)
- require.Equal(t, 1, counter)
-
- t.Run("return error", func(t *testing.T) {
- dur = 5
- err = errors.New("my awesome error")
- require.ErrorIs(t, bt.Reset(), err)
-
- tickN(bt, 2)
- require.Equal(t, 2, counter)
- })
- t.Run("change duration", func(t *testing.T) {
- dur = 5
- err = nil
- require.NoError(t, bt.Reset())
-
- tickN(bt, 5)
- require.Equal(t, 3, counter)
- })
-}
-
func TestBlockTimer(t *testing.T) {
- const blockDur = uint32(10)
+ blockDur := uint32(10)
baseCallCounter := uint32(0)
bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
@@ -96,6 +63,85 @@ func TestBlockTimer(t *testing.T) {
tickN(bt, intervalNum*blockDur)
require.Equal(t, intervalNum, uint32(baseCallCounter))
+
+ // add half-interval handler
+ halfCallCounter := uint32(0)
+
+ bt.OnDelta(1, 2, func() {
+ halfCallCounter++
+ })
+
+ // add double interval handler
+ doubleCallCounter := uint32(0)
+
+ bt.OnDelta(2, 1, func() {
+ doubleCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ baseCallCounter = 0
+ intervalNum = 20
+
+ tickN(bt, intervalNum*blockDur)
+
+ require.Equal(t, intervalNum, uint32(halfCallCounter))
+ require.Equal(t, intervalNum, uint32(baseCallCounter))
+ require.Equal(t, intervalNum/2, uint32(doubleCallCounter))
+}
+
+func TestDeltaPulse(t *testing.T) {
+ blockDur := uint32(9)
+ baseCallCounter := uint32(0)
+
+ bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ deltaCallCounter := uint32(0)
+
+ div := uint32(3)
+
+ bt.OnDelta(1, div, func() {
+ deltaCallCounter++
+ }, timer.WithPulse())
+
+ require.NoError(t, bt.Reset())
+
+ intervalNum := uint32(7)
+
+ tickN(bt, intervalNum*blockDur)
+
+ require.Equal(t, intervalNum, uint32(baseCallCounter))
+ require.Equal(t, intervalNum*div, uint32(deltaCallCounter))
+}
+
+func TestDeltaReset(t *testing.T) {
+ blockDur := uint32(6)
+ baseCallCounter := 0
+
+ bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ detlaCallCounter := 0
+
+ bt.OnDelta(1, 3, func() {
+ detlaCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 6)
+
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 3)
+
+ require.Equal(t, 2, detlaCallCounter)
}
func TestNewOneTickTimer(t *testing.T) {
@@ -122,51 +168,82 @@ func TestNewOneTickTimer(t *testing.T) {
tickN(bt, 10)
require.Equal(t, 1, baseCallCounter)
})
+
+ t.Run("delta without pulse", func(t *testing.T) {
+ blockDur = uint32(10)
+ baseCallCounter = 0
+
+ bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ detlaCallCounter := 0
+
+ bt.OnDelta(1, 10, func() {
+ detlaCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 10)
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+
+ tickN(bt, 10) // 10 more ticks must not affect counters
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+ })
}
func TestBlockTimer_TickSameHeight(t *testing.T) {
- var baseCounter int
+ var baseCounter, deltaCounter int
blockDur := uint32(2)
bt := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
func() { baseCounter++ })
+ bt.OnDelta(2, 1, func() {
+ deltaCounter++
+ })
require.NoError(t, bt.Reset())
- check := func(t *testing.T, h uint32, base int) {
+ check := func(t *testing.T, h uint32, base, delta int) {
for range 2 * int(blockDur) {
bt.Tick(h)
require.Equal(t, base, baseCounter)
+ require.Equal(t, delta, deltaCounter)
}
}
- check(t, 1, 0)
- check(t, 2, 1)
- check(t, 3, 1)
- check(t, 4, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 1, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 2, 1)
t.Run("works the same way after `Reset()`", func(t *testing.T) {
t.Run("same block duration", func(t *testing.T) {
require.NoError(t, bt.Reset())
baseCounter = 0
+ deltaCounter = 0
- check(t, 1, 0)
- check(t, 2, 1)
- check(t, 3, 1)
- check(t, 4, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 1, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 2, 1)
})
t.Run("different block duration", func(t *testing.T) {
blockDur = 3
require.NoError(t, bt.Reset())
baseCounter = 0
+ deltaCounter = 0
- check(t, 1, 0)
- check(t, 2, 0)
- check(t, 3, 1)
- check(t, 4, 1)
- check(t, 5, 1)
- check(t, 6, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 0, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 1, 0)
+ check(t, 5, 1, 0)
+ check(t, 6, 2, 1)
})
})
}
diff --git a/pkg/network/address.go b/pkg/network/address.go
index 4643eef15..88f4a571d 100644
--- a/pkg/network/address.go
+++ b/pkg/network/address.go
@@ -2,12 +2,12 @@ package network
import (
"errors"
+ "fmt"
"net"
"net/url"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
@@ -44,9 +44,11 @@ func (a Address) equal(addr Address) bool {
// See also FromString.
func (a Address) URIAddr() string {
_, host, err := manet.DialArgs(a.ma)
- // the only correct way to construct Address is AddressFromString
- // which makes this error appear unexpected
- assert.NoError(err, "could not get host addr")
+ if err != nil {
+ // the only correct way to construct Address is AddressFromString
+ // which makes this error appear unexpected
+ panic(fmt.Errorf("could not get host addr: %w", err))
+ }
if !a.IsTLSEnabled() {
return host
diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go
index 63ae0bfdb..371d3c76f 100644
--- a/pkg/network/cache/client.go
+++ b/pkg/network/cache/client.go
@@ -5,7 +5,6 @@ import (
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
@@ -26,7 +25,6 @@ type (
Key *ecdsa.PrivateKey
ResponseCallback func(client.ResponseMetaInfo) error
AllowExternal bool
- DialerSource *net.DialerSource
}
)
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 54c1e18fb..9305c143b 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -7,13 +7,11 @@ import (
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"google.golang.org/grpc"
@@ -62,26 +60,18 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address
prmInit.Key = *x.opts.Key
}
- grpcOpts := []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(
- qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
- metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInterceptor(),
- tagging.NewUnaryClientInterceptor(),
- ),
- grpc.WithChainStreamInterceptor(
- qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
- metrics.NewStreamClientInterceptor(),
- tracing.NewStreamClientInterceptor(),
- tagging.NewStreamClientInterceptor(),
- ),
- grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- }
-
prmDial := client.PrmDial{
- Endpoint: addr.URIAddr(),
- GRPCDialOptions: grpcOpts,
+ Endpoint: addr.URIAddr(),
+ GRPCDialOptions: []grpc.DialOption{
+ grpc.WithChainUnaryInterceptor(
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ ),
+ },
}
if x.opts.DialTimeout > 0 {
prmDial.DialTimeout = x.opts.DialTimeout
@@ -161,7 +151,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
group.IterateAddresses(func(addr network.Address) bool {
select {
case <-ctx.Done():
- firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled)
+ firstErr = context.Canceled
return true
default:
}
@@ -179,16 +169,15 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
var siErr *objectSDK.SplitInfoError
var eiErr *objectSDK.ECInfoError
- if err != nil {
- err = fmt.Errorf("client connection error at %v: %w", addr, err)
- x.ReportError(err)
- }
-
success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr)
if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) {
firstErr = err
}
+ if err != nil {
+ x.ReportError(err)
+ }
+
return success
})
diff --git a/pkg/network/group.go b/pkg/network/group.go
index 0044fb2d4..a6de0653e 100644
--- a/pkg/network/group.go
+++ b/pkg/network/group.go
@@ -3,8 +3,6 @@ package network
import (
"errors"
"fmt"
- "iter"
- "slices"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -69,10 +67,11 @@ func (x AddressGroup) Swap(i, j int) {
// MultiAddressIterator is an interface of network address group.
type MultiAddressIterator interface {
- // Addresses must return an iterator over network addresses.
- Addresses() iter.Seq[string]
+ // Must iterate over network addresses and pass each one
+ // to the handler until it returns true.
+ IterateAddresses(func(string) bool)
- // NumberOfAddresses must return number of addresses in group.
+ // Must return number of addresses in group.
NumberOfAddresses() int
}
@@ -131,19 +130,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error {
// iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f
// until 1st parsing failure or f's error.
func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) {
- for s := range iter.Addresses() {
+ iter.IterateAddresses(func(s string) bool {
var a Address
err = a.FromString(s)
if err != nil {
- return fmt.Errorf("could not parse address from string: %w", err)
+ err = fmt.Errorf("could not parse address from string: %w", err)
+ return true
}
err = f(a)
- if err != nil {
- return err
- }
- }
+
+ return err != nil
+ })
return
}
@@ -165,8 +164,10 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) {
// at least one common address.
func (x AddressGroup) Intersects(x2 AddressGroup) bool {
for i := range x {
- if slices.ContainsFunc(x2, x[i].equal) {
- return true
+ for j := range x2 {
+ if x[i].equal(x2[j]) {
+ return true
+ }
}
}
diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go
index d08264533..5b335fa52 100644
--- a/pkg/network/group_test.go
+++ b/pkg/network/group_test.go
@@ -1,8 +1,6 @@
package network
import (
- "iter"
- "slices"
"sort"
"testing"
@@ -60,8 +58,10 @@ func TestAddressGroup_FromIterator(t *testing.T) {
type testIterator []string
-func (t testIterator) Addresses() iter.Seq[string] {
- return slices.Values(t)
+func (t testIterator) IterateAddresses(f func(string) bool) {
+ for i := range t {
+ f(t[i])
+ }
}
func (t testIterator) NumberOfAddresses() int {
diff --git a/pkg/network/transport/accounting/grpc/service.go b/pkg/network/transport/accounting/grpc/service.go
index 78129bfbe..2144a3001 100644
--- a/pkg/network/transport/accounting/grpc/service.go
+++ b/pkg/network/transport/accounting/grpc/service.go
@@ -3,9 +3,9 @@ package accounting
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
+ accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc"
accountingsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
- accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc"
)
// Server wraps FrostFS API Accounting service and
diff --git a/pkg/network/transport/apemanager/grpc/service.go b/pkg/network/transport/apemanager/grpc/service.go
index 850d38a65..59783cfc0 100644
--- a/pkg/network/transport/apemanager/grpc/service.go
+++ b/pkg/network/transport/apemanager/grpc/service.go
@@ -3,9 +3,9 @@ package apemanager
import (
"context"
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
+ apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc"
apemanager_svc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager"
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
- apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
)
type Server struct {
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index 8cbf8d9c3..9fae22b45 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -3,9 +3,9 @@ package container
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
containersvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
)
// Server wraps FrostFS API Container service and
@@ -80,26 +80,3 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
-
-type containerStreamerV2 struct {
- containerGRPC.ContainerService_ListStreamServer
-}
-
-func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error {
- return s.ContainerService_ListStreamServer.Send(
- resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse),
- )
-}
-
-// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data
-// to gRPC stream.
-func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error {
- listReq := new(container.ListStreamRequest)
- if err := listReq.FromGRPCMessage(req); err != nil {
- return err
- }
-
- return s.srv.ListStream(listReq, &containerStreamerV2{
- ContainerService_ListStreamServer: gStream,
- })
-}
diff --git a/pkg/network/transport/netmap/grpc/service.go b/pkg/network/transport/netmap/grpc/service.go
index 4bc3a42f8..406c77e58 100644
--- a/pkg/network/transport/netmap/grpc/service.go
+++ b/pkg/network/transport/netmap/grpc/service.go
@@ -3,9 +3,9 @@ package grpc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
netmapsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
- netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc"
)
// Server wraps FrostFS API Netmap service and
diff --git a/pkg/network/transport/object/grpc/get.go b/pkg/network/transport/object/grpc/get.go
index 655b1f9fb..e1655c183 100644
--- a/pkg/network/transport/object/grpc/get.go
+++ b/pkg/network/transport/object/grpc/get.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
)
type getStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/range.go b/pkg/network/transport/object/grpc/range.go
index 7d7ce0e4c..391536e8e 100644
--- a/pkg/network/transport/object/grpc/range.go
+++ b/pkg/network/transport/object/grpc/range.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
)
type getRangeStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/search.go b/pkg/network/transport/object/grpc/search.go
index 8432707f7..a151ced09 100644
--- a/pkg/network/transport/object/grpc/search.go
+++ b/pkg/network/transport/object/grpc/search.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
)
type searchStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index 15dacd553..d55e3d87f 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -5,10 +5,10 @@ import (
"errors"
"io"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
// Server wraps FrostFS API Object service and
@@ -26,7 +26,7 @@ func New(c objectSvc.ServiceServer) *Server {
// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream.
func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
- stream, err := s.srv.Patch(gStream.Context())
+ stream, err := s.srv.Patch()
if err != nil {
return err
}
@@ -68,7 +68,7 @@ func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
- stream, err := s.srv.Put(gStream.Context())
+ stream, err := s.srv.Put()
if err != nil {
return err
}
diff --git a/pkg/network/transport/session/grpc/service.go b/pkg/network/transport/session/grpc/service.go
index 6fce397f3..e0dc74942 100644
--- a/pkg/network/transport/session/grpc/service.go
+++ b/pkg/network/transport/session/grpc/service.go
@@ -3,9 +3,9 @@ package session
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
sessionsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc"
)
// Server wraps FrostFS API Session service and
diff --git a/pkg/network/validation.go b/pkg/network/validation.go
index b5157f28f..92f650119 100644
--- a/pkg/network/validation.go
+++ b/pkg/network/validation.go
@@ -2,7 +2,6 @@ package network
import (
"errors"
- "iter"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -35,8 +34,8 @@ var (
// MultiAddressIterator.
type NodeEndpointsIterator netmap.NodeInfo
-func (x NodeEndpointsIterator) Addresses() iter.Seq[string] {
- return (netmap.NodeInfo)(x).NetworkEndpoints()
+func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) {
+ (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
}
func (x NodeEndpointsIterator) NumberOfAddresses() int {
diff --git a/pkg/services/accounting/executor.go b/pkg/services/accounting/executor.go
index 93e44c52b..b0722cf8a 100644
--- a/pkg/services/accounting/executor.go
+++ b/pkg/services/accounting/executor.go
@@ -4,8 +4,8 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
type ServiceExecutor interface {
diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go
index 6c2df8428..ac836b71d 100644
--- a/pkg/services/accounting/morph/executor.go
+++ b/pkg/services/accounting/morph/executor.go
@@ -5,9 +5,9 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
accountingSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
+func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errors.New("missing account")
@@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceReq
return nil, fmt.Errorf("invalid account: %w", err)
}
- amount, err := s.client.BalanceOf(ctx, id)
+ amount, err := s.client.BalanceOf(id)
if err != nil {
return nil, err
}
- balancePrecision, err := s.client.Decimals(ctx)
+ balancePrecision, err := s.client.Decimals()
if err != nil {
return nil, err
}
diff --git a/pkg/services/accounting/server.go b/pkg/services/accounting/server.go
index a280416fb..72833c46c 100644
--- a/pkg/services/accounting/server.go
+++ b/pkg/services/accounting/server.go
@@ -3,7 +3,7 @@ package accounting
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
)
// Server is an interface of the FrostFS API Accounting service server.
diff --git a/pkg/services/accounting/sign.go b/pkg/services/accounting/sign.go
index d8feb76bd..cd6ff0307 100644
--- a/pkg/services/accounting/sign.go
+++ b/pkg/services/accounting/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
type signService struct {
diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go
index 61fb025b8..d132ae7db 100644
--- a/pkg/services/apemanager/audit.go
+++ b/pkg/services/apemanager/audit.go
@@ -4,10 +4,10 @@ import (
"context"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
+ ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
- ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
)
var _ Server = (*auditService)(nil)
@@ -33,7 +33,7 @@ func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainReq
return res, err
}
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
+ audit.LogRequest(a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
res.GetBody().GetChainID()),
@@ -49,7 +49,7 @@ func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChain
return res, err
}
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
+ audit.LogRequest(a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
nil),
@@ -65,7 +65,7 @@ func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveCh
return res, err
}
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
+ audit.LogRequest(a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
req.GetBody().GetTarget().GetName(),
req.GetBody().GetChainID()),
diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go
index 1d485321c..e64f9a8d1 100644
--- a/pkg/services/apemanager/errors/errors.go
+++ b/pkg/services/apemanager/errors/errors.go
@@ -9,9 +9,3 @@ func ErrAPEManagerAccessDenied(reason string) error {
err.WriteReason(reason)
return err
}
-
-func ErrAPEManagerInvalidArgument(msg string) error {
- err := new(apistatus.InvalidArgument)
- err.SetMessage(msg)
- return err
-}
diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go
index fc08fe569..25f43486a 100644
--- a/pkg/services/apemanager/executor.go
+++ b/pkg/services/apemanager/executor.go
@@ -8,21 +8,20 @@ import (
"errors"
"fmt"
+ apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape"
+ apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
apemanager_errors "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager/errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apeV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/ape"
- apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/mr-tron/base58/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
@@ -35,8 +34,6 @@ type cfg struct {
type Service struct {
cfg
- waiter Waiter
-
cnrSrc containercore.Source
contractStorage ape_contract.ProxyAdaptedContractStorage
@@ -44,17 +41,11 @@ type Service struct {
type Option func(*cfg)
-type Waiter interface {
- WaitTxHalt(context.Context, uint32, util.Uint256) error
-}
-
-func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service {
+func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, opts ...Option) *Service {
s := &Service{
cnrSrc: cnrSrc,
contractStorage: contractStorage,
-
- waiter: waiter,
}
for i := range opts {
@@ -62,7 +53,7 @@ func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedC
}
if s.log == nil {
- s.log = logger.NewLoggerWrapper(zap.NewNop())
+ s.log = &logger.Logger{Logger: zap.NewNop()}
}
return s
@@ -78,12 +69,12 @@ var _ Server = (*Service)(nil)
// validateContainerTargetRequest validates request for the container target.
// It checks if request actor is the owner of the container, otherwise it denies the request.
-func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error {
+func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.PublicKey) error {
var cidSDK cidSDK.ID
if err := cidSDK.DecodeString(cid); err != nil {
- return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err))
+ return fmt.Errorf("invalid CID format: %w", err)
}
- isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey)
+ isOwner, err := s.isActorContainerOwner(cidSDK, pubKey)
if err != nil {
return fmt.Errorf("failed to check owner: %w", err)
}
@@ -93,7 +84,7 @@ func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string
return nil
}
-func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
+func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -101,7 +92,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques
chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw())
if err != nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error())
+ return nil, err
}
if len(chain.ID) == 0 {
const randomIDLength = 10
@@ -117,19 +108,15 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ return nil, fmt.Errorf("unsupported target type: %s", targetType)
}
- txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain)
- if err != nil {
- return nil, err
- }
- if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
+ if _, _, err = s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain); err != nil {
return nil, err
}
@@ -142,7 +129,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques
return resp, nil
}
-func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
+func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -153,19 +140,15 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ return nil, fmt.Errorf("unsupported target type: %s", targetType)
}
- txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID())
- if err != nil {
- return nil, err
- }
- if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
+ if _, _, err = s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()); err != nil {
return nil, err
}
@@ -177,7 +160,7 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain
return resp, nil
}
-func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
+func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
pub, err := getSignaturePublicKey(req.GetVerificationHeader())
if err != nil {
return nil, err
@@ -188,12 +171,12 @@ func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRe
switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
case apeV2.TargetTypeContainer:
reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ if err = s.validateContainerTargetRequest(reqCID, pub); err != nil {
return nil, err
}
target = policy_engine.ContainerTarget(reqCID)
default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ return nil, fmt.Errorf("unsupported target type: %s", targetType)
}
chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target)
@@ -227,23 +210,23 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK
}
sig := vh.GetBodySignature()
if sig == nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error())
+ return nil, errEmptyBodySignature
}
key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
if err != nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err))
+ return nil, fmt.Errorf("invalid signature key: %w", err)
}
return key, nil
}
-func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
+func (s *Service) isActorContainerOwner(cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
var actor user.ID
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
actorOwnerID := new(refs.OwnerID)
actor.WriteToV2(actorOwnerID)
- cnr, err := s.cnrSrc.Get(ctx, cid)
+ cnr, err := s.cnrSrc.Get(cid)
if err != nil {
return false, fmt.Errorf("get container error: %w", err)
}
diff --git a/pkg/services/apemanager/server.go b/pkg/services/apemanager/server.go
index e624177ac..90b2d92ae 100644
--- a/pkg/services/apemanager/server.go
+++ b/pkg/services/apemanager/server.go
@@ -3,7 +3,7 @@ package apemanager
import (
"context"
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
)
type Server interface {
diff --git a/pkg/services/apemanager/sign.go b/pkg/services/apemanager/sign.go
index a172624ff..eda2a7342 100644
--- a/pkg/services/apemanager/sign.go
+++ b/pkg/services/apemanager/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
)
type signService struct {
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
index eb6263320..278f6da31 100644
--- a/pkg/services/common/ape/checker.go
+++ b/pkg/services/common/ape/checker.go
@@ -1,7 +1,6 @@
package ape
import (
- "context"
"crypto/ecdsa"
"errors"
"fmt"
@@ -12,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
@@ -20,6 +20,7 @@ import (
)
var (
+ errInvalidTargetType = errors.New("bearer token defines non-container target override")
errBearerExpired = errors.New("bearer token has expired")
errBearerInvalidSignature = errors.New("bearer token has invalid signature")
errBearerInvalidContainerID = errors.New("bearer token was created for another container")
@@ -43,12 +44,15 @@ type CheckPrm struct {
// The request's bearer token. It is used in order to check APE overrides with the token.
BearerToken *bearer.Token
+
+ // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
+ SoftAPECheck bool
}
// CheckCore provides methods to perform the common logic of APE check.
type CheckCore interface {
// CheckAPE performs the common policy-engine check logic on a prepared request.
- CheckAPE(ctx context.Context, prm CheckPrm) error
+ CheckAPE(prm CheckPrm) error
}
type checkerCoreImpl struct {
@@ -70,30 +74,22 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora
}
// CheckAPE performs the common policy-engine check logic on a prepared request.
-func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error {
+func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error {
var cr policyengine.ChainRouter
- if prm.BearerToken != nil {
+ if prm.BearerToken != nil && !prm.BearerToken.Impersonate() {
var err error
if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil {
return fmt.Errorf("bearer validation error: %w", err)
}
- if prm.BearerToken.Impersonate() {
- cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
- } else {
- override, isSet := prm.BearerToken.APEOverride()
- if !isSet {
- return errors.New("expected for override within bearer")
- }
- cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override)
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
- }
+ cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride())
+ if err != nil {
+ return fmt.Errorf("create chain router error: %w", err)
}
} else {
cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
}
- groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey)
+ groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey)
if err != nil {
return fmt.Errorf("failed to get group ids: %w", err)
}
@@ -108,10 +104,17 @@ func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error {
if err != nil {
return err
}
- if found && status == apechain.Allow {
+ if !found && prm.SoftAPECheck || status == apechain.Allow {
return nil
}
- return newChainRouterError(prm.Request.Operation(), status)
+ err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String())
+ return apeErr(err)
+}
+
+func apeErr(err error) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(err.Error())
+ return errAccessDenied
}
// isValidBearer checks whether bearer token was correctly signed by authorized
@@ -133,19 +136,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe
}
// Check for ape overrides defined in the bearer token.
- if apeOverride, isSet := token.APEOverride(); isSet {
- switch apeOverride.Target.TargetType {
- case ape.TargetTypeContainer:
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !cntID.Equals(targetCnr) {
- return errBearerInvalidContainerID
- }
- default:
- }
+ apeOverride := token.APEOverride()
+ if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
+ return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
+ }
+
+ // Then check if container is either empty or equal to the container in the request.
+ var targetCnr cid.ID
+ err := targetCnr.DecodeString(apeOverride.Target.Name)
+ if err != nil {
+ return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
+ }
+ if !cntID.Equals(targetCnr) {
+ return errBearerInvalidContainerID
}
// Then check if container owner signed this token.
@@ -157,16 +160,8 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe
var usrSender user.ID
user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
- // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's
- // public key, but not the actual sender.
- if !token.Impersonate() {
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
- }
- } else {
- if !bearer.ResolveIssuer(*token).Equals(usrSender) {
- return errBearerInvalidOwner
- }
+ if !token.AssertUser(usrSender) {
+ return errBearerInvalidOwner
}
return nil
diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go
deleted file mode 100644
index d3c381de7..000000000
--- a/pkg/services/common/ape/error.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package ape
-
-import (
- "fmt"
-
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
-)
-
-// ChainRouterError is returned when chain router validation prevents
-// the APE request from being processed (no rule found, access denied, etc.).
-type ChainRouterError struct {
- operation string
- status apechain.Status
-}
-
-func (e *ChainRouterError) Error() string {
- return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status())
-}
-
-func (e *ChainRouterError) Operation() string {
- return e.operation
-}
-
-func (e *ChainRouterError) Status() apechain.Status {
- return e.status
-}
-
-func newChainRouterError(operation string, status apechain.Status) *ChainRouterError {
- return &ChainRouterError{
- operation: operation,
- status: status,
- }
-}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
index 3b5dab9aa..d92ecf58b 100644
--- a/pkg/services/container/ape.go
+++ b/pkg/services/container/ape.go
@@ -12,14 +12,14 @@ import (
"net"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -49,11 +49,11 @@ var (
)
type ir interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
+ InnerRingKeys() ([][]byte, error)
}
type containers interface {
- Get(context.Context, cid.ID) (*containercore.Container, error)
+ Get(cid.ID) (*containercore.Container, error)
}
type apeChecker struct {
@@ -106,7 +106,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
defer span.End()
- role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
if err != nil {
return nil, err
}
@@ -116,7 +116,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
if err != nil {
return nil, err
}
@@ -126,11 +126,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
}
}
- namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
+ namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("could not get owner namespace: %w", err)
}
- if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
+ if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil {
return nil, err
}
@@ -143,7 +143,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
reqProps,
)
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
if err != nil {
return nil, fmt.Errorf("failed to get group ids: %w", err)
}
@@ -175,84 +175,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
return nil, apeErr(nativeschema.MethodListContainers, s)
}
-func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream")
- defer span.End()
-
- role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
- if err != nil {
- return err
- }
-
- reqProps := map[string]string{
- nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
- nativeschema.PropertyKeyActorRole: role,
- }
-
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
- if err != nil {
- return err
- }
- if p, ok := peer.FromContext(ctx); ok {
- if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
- reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
- }
- }
-
- namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
- if err != nil {
- return fmt.Errorf("could not get owner namespace: %w", err)
- }
- if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
- return err
- }
-
- request := aperequest.NewRequest(
- nativeschema.MethodListContainers,
- aperequest.NewResource(
- resourceName(namespace, ""),
- make(map[string]string),
- ),
- reqProps,
- )
-
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
- }
-
- rt := policyengine.NewRequestTargetWithNamespace(namespace)
- rt.User = &policyengine.Target{
- Type: policyengine.User,
- Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
- }
- rt.Groups = make([]policyengine.Target, len(groups))
- for i := range groups {
- rt.Groups[i] = policyengine.GroupTarget(groups[i])
- }
-
- s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
- if err != nil {
- return err
- }
-
- if found && s == apechain.Allow {
- return ac.next.ListStream(req, stream)
- }
-
- return apeErr(nativeschema.MethodListContainers, s)
-}
-
func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put")
defer span.End()
- role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
if err != nil {
return nil, err
}
@@ -262,7 +189,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
if err != nil {
return nil, err
}
@@ -272,7 +199,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
}
}
- namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID())
+ namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("get namespace error: %w", err)
}
@@ -280,21 +207,16 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, err
}
- cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer())
- if err != nil {
- return nil, fmt.Errorf("get container properties: %w", err)
- }
-
request := aperequest.NewRequest(
nativeschema.MethodPutContainer,
aperequest.NewResource(
resourceName(namespace, ""),
- cnrProps,
+ make(map[string]string),
),
reqProps,
)
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
if err != nil {
return nil, fmt.Errorf("failed to get group ids: %w", err)
}
@@ -326,7 +248,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, apeErr(nativeschema.MethodPutContainer, s)
}
-func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
+func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
if vh == nil {
return "", nil, errMissingVerificationHeader
}
@@ -349,7 +271,7 @@ func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.O
}
pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(ctx, pkBytes)
+ isIR, err := ac.isInnerRingKey(pkBytes)
if err != nil {
return "", nil, err
}
@@ -370,7 +292,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
return err
}
- cont, err := ac.reader.Get(ctx, id)
+ cont, err := ac.reader.Get(id)
if err != nil {
return err
}
@@ -386,7 +308,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
namespace = cntNamespace
}
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
if err != nil {
return fmt.Errorf("failed to get group ids: %w", err)
}
@@ -400,7 +322,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con
op,
aperequest.NewResource(
resourceName(namespace, id.EncodeToString()),
- getContainerProps(cont),
+ ac.getContainerProps(cont),
),
reqProps,
)
@@ -450,26 +372,10 @@ func resourceName(namespace string, container string) string {
return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container)
}
-func getContainerProps(c *containercore.Container) map[string]string {
- props := map[string]string{
+func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string {
+ return map[string]string{
nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(),
}
- for attrName, attrVal := range c.Value.Attributes() {
- name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName)
- props[name] = attrVal
- }
- return props
-}
-
-func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) {
- if cnrV2 == nil {
- return nil, errors.New("container is not set")
- }
- c := cnrSDK.Container{}
- if err := c.ReadFromV2(*cnrV2); err != nil {
- return nil, err
- }
- return getContainerProps(&containercore.Container{Value: c}), nil
}
func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader,
@@ -479,7 +385,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
if err != nil {
return nil, nil, err
}
- role, err := ac.getRole(ctx, actor, pk, cont, cnrID)
+ role, err := ac.getRole(actor, pk, cont, cnrID)
if err != nil {
return nil, nil, err
}
@@ -487,7 +393,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
nativeschema.PropertyKeyActorRole: role,
}
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
if err != nil {
return nil, nil, err
}
@@ -499,13 +405,13 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe
return reqProps, pk, nil
}
-func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
+func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
if cont.Value.Owner().Equals(*actor) {
return nativeschema.PropertyValueContainerRoleOwner, nil
}
pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(ctx, pkBytes)
+ isIR, err := ac.isInnerRingKey(pkBytes)
if err != nil {
return "", err
}
@@ -513,7 +419,7 @@ func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.Publ
return nativeschema.PropertyValueContainerRoleIR, nil
}
- isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont)
+ isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont)
if err != nil {
return "", err
}
@@ -607,8 +513,8 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) {
- innerRingKeys, err := ac.ir.InnerRingKeys(ctx)
+func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) {
+ innerRingKeys, err := ac.ir.InnerRingKeys()
if err != nil {
return false, err
}
@@ -622,47 +528,50 @@ func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, erro
return false, nil
}
-func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
+func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
binCnrID := make([]byte, sha256.Size)
cnrID.Encode(binCnrID)
- nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm)
+ nm, err := netmap.GetLatestNetworkMap(ac.nm)
if err != nil {
return false, err
}
- if isContainerNode(nm, pk, binCnrID, cont) {
+ in, err := isContainerNode(nm, pk, binCnrID, cont)
+ if err != nil {
+ return false, err
+ } else if in {
return true, nil
}
// then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container
- nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm)
+ nm, err = netmap.GetPreviousNetworkMap(ac.nm)
if err != nil {
return false, err
}
- return isContainerNode(nm, pk, binCnrID, cont), nil
+ return isContainerNode(nm, pk, binCnrID, cont)
}
-func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool {
- // It could an error only if the network map doesn't have enough nodes to
- // fulfil the policy. It's a logical error that doesn't affect an actor role
- // determining, so we ignore it
- cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
+func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) (bool, error) {
+ cnrVectors, err := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
+ if err != nil {
+ return false, err
+ }
for i := range cnrVectors {
for j := range cnrVectors[i] {
if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
- return true
+ return true, nil
}
}
}
- return false
+ return false, nil
}
-func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
+func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
var ownerSDK user.ID
if owner == nil {
return "", errOwnerIDIsNotSet
@@ -670,19 +579,24 @@ func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID)
if err := ownerSDK.ReadFromV2(*owner); err != nil {
return "", err
}
- addr := ownerSDK.ScriptHash()
+ addr, err := ownerSDK.ScriptHash()
+ if err != nil {
+ return "", err
+ }
namespace := ""
- subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
+ subject, err := ac.frostFSIDClient.GetSubject(addr)
if err == nil {
namespace = subject.Namespace
- } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
- return "", fmt.Errorf("get subject error: %w", err)
+ } else {
+ if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
+ return "", fmt.Errorf("get subject error: %w", err)
+ }
}
return namespace, nil
}
-func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
+func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) {
var ownerSDK user.ID
if owner == nil {
return "", errOwnerIDIsNotSet
@@ -690,8 +604,11 @@ func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.Own
if err := ownerSDK.ReadFromV2(*owner); err != nil {
return "", err
}
- addr := ownerSDK.ScriptHash()
- subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
+ addr, err := ownerSDK.ScriptHash()
+ if err != nil {
+ return "", err
+ }
+ subject, err := ac.frostFSIDClient.GetSubject(addr)
if err != nil {
return "", fmt.Errorf("get subject error: %w", err)
}
@@ -725,12 +642,12 @@ func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) erro
// validateNamespace validates if a namespace of a request actor equals to owner's namespace.
// An actor's namespace is calculated by a public key.
-func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error {
+func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error {
var actor user.ID
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
actorOwnerID := new(refs.OwnerID)
actor.WriteToV2(actorOwnerID)
- actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID)
+ actorNamespace, err := ac.namespaceByOwner(actorOwnerID)
if err != nil {
return fmt.Errorf("could not get actor namespace: %w", err)
}
@@ -741,11 +658,11 @@ func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
+func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk)
+ props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
index 6438c34ca..d6f9b75ef 100644
--- a/pkg/services/container/ape_test.go
+++ b/pkg/services/container/ape_test.go
@@ -9,13 +9,13 @@ import (
"net"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -54,8 +54,6 @@ func TestAPE(t *testing.T) {
t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace)
t.Run("deny list containers for owner with PK", testDenyListContainersForPK)
t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError)
- t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr)
- t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr)
}
const (
@@ -566,185 +564,6 @@ func testDenyGetContainerByIP(t *testing.T) {
require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
}
-func testDenyGetContainerSysZoneAttr(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19888,
- },
- },
- },
- },
- }
-
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindResource,
- Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
- Value: "eggplant",
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- resp, err := apeSrv.Get(ctxWithPeerInfo(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
- require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
-}
-
-func testDenyPutContainerSysZoneAttr(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
- owner := testContainer.Owner()
- ownerAddr := owner.ScriptHash()
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- ownerAddr: {},
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- ownerAddr: {},
- },
- }
-
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodPutContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- nativeschema.ResourceFormatRootContainers,
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindResource,
- Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
- Value: "eggplant",
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := initPutRequest(t, testContainer)
-
- resp, err := apeSrv.Put(ctxWithPeerInfo(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
- require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
-}
-
func testDenyGetContainerByGroupID(t *testing.T) {
t.Parallel()
srv := &srvStub{
@@ -859,7 +678,8 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
testContainer := containertest.Container()
owner := testContainer.Owner()
- ownerAddr := owner.ScriptHash()
+ ownerAddr, err := owner.ScriptHash()
+ require.NoError(t, err)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
ownerAddr: {},
@@ -870,7 +690,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
nm.currentEpoch = 100
nm.netmaps = map[uint64]*netmap.NetMap{}
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
Rules: []chain.Rule{
{
Status: chain.AccessDenied,
@@ -953,7 +773,7 @@ func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) {
require.NoError(t, err)
req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(t, testContainer)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
@@ -1037,7 +857,7 @@ func testDenyPutContainerInvalidNamespace(t *testing.T) {
require.NoError(t, err)
req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(t, testContainer)
frostfsIDSubjectReader := &frostfsidStub{
subjects: map[util.Uint160]*client.Subject{
@@ -1259,11 +1079,6 @@ func (s *srvStub) List(context.Context, *container.ListRequest) (*container.List
return &container.ListResponse{}, nil
}
-func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error {
- s.calls["ListStream"]++
- return nil
-}
-
func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) {
s.calls["Put"]++
return &container.PutResponse{}, nil
@@ -1273,7 +1088,7 @@ type irStub struct {
keys [][]byte
}
-func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) {
+func (s *irStub) InnerRingKeys() ([][]byte, error) {
return s.keys, nil
}
@@ -1281,7 +1096,7 @@ type containerStub struct {
c map[cid.ID]*containercore.Container
}
-func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) {
+func (s *containerStub) Get(id cid.ID) (*containercore.Container, error) {
if v, ok := s.c[id]; ok {
return v, nil
}
@@ -1293,21 +1108,21 @@ type netmapStub struct {
currentEpoch uint64
}
-func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+func (s *netmapStub) GetNetMap(diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, errors.New("invalid diff")
}
- return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
+ return s.GetNetMapByEpoch(s.currentEpoch - diff)
}
-func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
+func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, errors.New("netmap not found")
}
-func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
+func (s *netmapStub) Epoch() (uint64, error) {
return s.currentEpoch, nil
}
@@ -1316,7 +1131,7 @@ type frostfsidStub struct {
subjectsExt map[util.Uint160]*client.SubjectExtended
}
-func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) {
+func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) {
s, ok := f.subjects[owner]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -1324,7 +1139,7 @@ func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*cl
return s, nil
}
-func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsidStub) GetSubjectExtended(owner util.Uint160) (*client.SubjectExtended, error) {
s, ok := f.subjectsExt[owner]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -1712,21 +1527,26 @@ func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.Put
return req
}
-func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 {
+func initOwnerIDScriptHash(t *testing.T, testContainer cnrSDK.Container) util.Uint160 {
var ownerSDK *user.ID
owner := testContainer.Owner()
ownerSDK = &owner
- return ownerSDK.ScriptHash()
+ sc, err := ownerSDK.ScriptHash()
+ require.NoError(t, err)
+ return sc
}
func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) {
var actorUserID user.ID
user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey()))
- actorScriptHash = actorUserID.ScriptHash()
+ var err error
+ actorScriptHash, err = actorUserID.ScriptHash()
+ require.NoError(t, err)
var ownerUserID user.ID
user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey()))
- ownerScriptHash = ownerUserID.ScriptHash()
+ ownerScriptHash, err = ownerUserID.ScriptHash()
+ require.NoError(t, err)
require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String())
return
}
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
index b235efa3c..b257272f5 100644
--- a/pkg/services/container/audit.go
+++ b/pkg/services/container/audit.go
@@ -4,10 +4,10 @@ import (
"context"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ container_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- container_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@@ -35,7 +35,7 @@ func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest)
return res, err
}
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
+ audit.LogRequest(a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
@@ -47,7 +47,7 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req,
+ audit.LogRequest(a.log, container_grpc.ContainerService_Get_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
@@ -58,29 +58,18 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req,
+ audit.LogRequest(a.log, container_grpc.ContainerService_List_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
return res, err
}
-// ListStream implements Server.
-func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- err := a.next.ListStream(req, stream)
- if !a.enabled.Load() {
- return err
- }
- audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
- return err
-}
-
// Put implements Server.
func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
res, err := a.next.Put(ctx, req)
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req,
+ audit.LogRequest(a.log, container_grpc.ContainerService_Put_FullMethodName, req,
audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index cdd0d2514..0917e3bd0 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -4,9 +4,9 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
type ServiceExecutor interface {
@@ -14,7 +14,6 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
- ListStream(context.Context, *container.ListStreamRequest, ListStream) error
}
type executorSvc struct {
@@ -94,11 +93,3 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
s.respSvc.SetMeta(resp)
return resp, nil
}
-
-func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- err := s.exec.ListStream(stream.Context(), req, stream)
- if err != nil {
- return fmt.Errorf("could not execute ListStream request: %w", err)
- }
- return nil
-}
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index eaa608eba..05d8749cf 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -5,11 +5,11 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -25,20 +25,20 @@ type morphExecutor struct {
// Reader is an interface of read-only container storage.
type Reader interface {
containercore.Source
+ containercore.EACLSource
// ContainersOf returns a list of container identifiers belonging
// to the specified user of FrostFS system. Returns the identifiers
// of all FrostFS containers if pointer to owner identifier is nil.
- ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
- IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
+ ContainersOf(*user.ID) ([]cid.ID, error)
}
// Writer is an interface of container storage updater.
type Writer interface {
// Put stores specified container in the side chain.
- Put(context.Context, containercore.Container) (*cid.ID, error)
+ Put(containercore.Container) (*cid.ID, error)
// Delete removes specified container from the side chain.
- Delete(context.Context, containercore.RemovalWitness) error
+ Delete(containercore.RemovalWitness) error
}
func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
@@ -48,7 +48,7 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
+func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
sigV2 := body.GetSignature()
if sigV2 == nil {
// TODO(@cthulhu-rider): #468 use "const" error
@@ -81,7 +81,7 @@ func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *c
}
}
- idCnr, err := s.wrt.Put(ctx, cnr)
+ idCnr, err := s.wrt.Put(cnr)
if err != nil {
return nil, err
}
@@ -95,7 +95,7 @@ func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *c
return res, nil
}
-func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
+func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -125,7 +125,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body
rmWitness.Signature = body.GetSignature()
rmWitness.SessionToken = tok
- err = s.wrt.Delete(ctx, rmWitness)
+ err = s.wrt.Delete(rmWitness)
if err != nil {
return nil, err
}
@@ -133,7 +133,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body
return new(container.DeleteResponseBody), nil
}
-func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
+func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -146,7 +146,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody)
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- cnr, err := s.rdr.Get(ctx, id)
+ cnr, err := s.rdr.Get(id)
if err != nil {
return nil, err
}
@@ -173,7 +173,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody)
return res, nil
}
-func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
+func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errMissingUserID
@@ -186,7 +186,7 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod
return nil, fmt.Errorf("invalid user ID: %w", err)
}
- cnrs, err := s.rdr.ContainersOf(ctx, &id)
+ cnrs, err := s.rdr.ContainersOf(&id)
if err != nil {
return nil, err
}
@@ -201,56 +201,3 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod
return res, nil
}
-
-func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error {
- body := req.GetBody()
- idV2 := body.GetOwnerID()
- if idV2 == nil {
- return errMissingUserID
- }
-
- var id user.ID
-
- err := id.ReadFromV2(*idV2)
- if err != nil {
- return fmt.Errorf("invalid user ID: %w", err)
- }
-
- resBody := new(container.ListStreamResponseBody)
- r := new(container.ListStreamResponse)
- r.SetBody(resBody)
-
- var cidList []refs.ContainerID
-
- // Amount of containers to send at once.
- const batchSize = 1000
-
- processCID := func(id cid.ID) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var refID refs.ContainerID
- id.WriteToV2(&refID)
- cidList = append(cidList, refID)
- if len(cidList) == batchSize {
- r.GetBody().SetContainerIDs(cidList)
- cidList = cidList[:0]
- return stream.Send(r)
- }
- return nil
- }
-
- if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil {
- return err
- }
-
- if len(cidList) > 0 {
- r.GetBody().SetContainerIDs(cidList)
- return stream.Send(r)
- }
-
- return nil
-}
diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go
index 1f6fdb0be..560c69232 100644
--- a/pkg/services/container/morph/executor_test.go
+++ b/pkg/services/container/morph/executor_test.go
@@ -4,12 +4,12 @@ import (
"context"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
containerSvcMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test"
@@ -24,11 +24,15 @@ type mock struct {
containerSvcMorph.Reader
}
-func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) {
+func (m mock) Put(_ containerCore.Container) (*cid.ID, error) {
return new(cid.ID), nil
}
-func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error {
+func (m mock) Delete(_ containerCore.RemovalWitness) error {
+ return nil
+}
+
+func (m mock) PutEACL(_ containerCore.EACL) error {
return nil
}
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index d9208077d..a19d83c56 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -3,8 +3,7 @@ package container
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
)
// Server is an interface of the FrostFS API Container service server.
@@ -13,11 +12,4 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
- ListStream(*container.ListStreamRequest, ListStream) error
-}
-
-// ListStream is an interface of FrostFS API v2 compatible search streamer.
-type ListStream interface {
- util.ServerStream
- Send(*container.ListStreamResponse) error
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index 85fe7ae87..f7f5d6486 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
)
type signService struct {
@@ -56,40 +56,3 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co
resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req))
return resp, s.sigSvc.SignResponse(resp, err)
}
-
-func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(container.ListStreamResponse)
- _ = s.sigSvc.SignResponse(resp, err)
- return stream.Send(resp)
- }
-
- ss := &listStreamSigner{
- ListStream: stream,
- sigSvc: s.sigSvc,
- }
- err := s.svc.ListStream(req, ss)
- if err != nil || !ss.nonEmptyResp {
- return ss.send(new(container.ListStreamResponse), err)
- }
- return nil
-}
-
-type listStreamSigner struct {
- ListStream
- sigSvc *util.SignService
-
- nonEmptyResp bool // set on first Send call
-}
-
-func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error {
- s.nonEmptyResp = true
- return s.send(resp, nil)
-}
-
-func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error {
- if err := s.sigSvc.SignResponse(resp, err); err != nil {
- return err
- }
- return s.ListStream.Send(resp)
-}
diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go
deleted file mode 100644
index 4f8708da7..000000000
--- a/pkg/services/container/transport_splitter.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package container
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
-)
-
-type (
- TransportSplitter struct {
- next Server
-
- respSvc *response.Service
- cnrAmount uint32
- }
-
- listStreamMsgSizeCtrl struct {
- util.ServerStream
- stream ListStream
- respSvc *response.Service
- cnrAmount uint32
- }
-)
-
-func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server {
- return &TransportSplitter{
- next: next,
- respSvc: respSvc,
- cnrAmount: cnrAmount,
- }
-}
-
-func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
- return s.next.Put(ctx, req)
-}
-
-func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
- return s.next.Delete(ctx, req)
-}
-
-func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
- return s.next.Get(ctx, req)
-}
-
-func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
- return s.next.List(ctx, req)
-}
-
-func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- return s.next.ListStream(req, &listStreamMsgSizeCtrl{
- ServerStream: stream,
- stream: stream,
- respSvc: s.respSvc,
- cnrAmount: s.cnrAmount,
- })
-}
-
-func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error {
- s.respSvc.SetMeta(resp)
- body := resp.GetBody()
- ids := body.GetContainerIDs()
-
- var newResp *container.ListStreamResponse
-
- for {
- if newResp == nil {
- newResp = new(container.ListStreamResponse)
- newResp.SetBody(body)
- }
-
- cut := min(s.cnrAmount, uint32(len(ids)))
-
- body.SetContainerIDs(ids[:cut])
- newResp.SetMetaHeader(resp.GetMetaHeader())
- newResp.SetVerificationHeader(resp.GetVerificationHeader())
-
- if err := s.stream.Send(newResp); err != nil {
- return fmt.Errorf("TransportSplitter: %w", err)
- }
-
- ids = ids[cut:]
-
- if len(ids) == 0 {
- break
- }
- }
-
- return nil
-}
diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go
index 37daf67be..fd6f020d1 100644
--- a/pkg/services/control/convert.go
+++ b/pkg/services/control/convert.go
@@ -1,8 +1,8 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message"
)
type requestWrapper struct {
diff --git a/pkg/services/control/ir/convert.go b/pkg/services/control/ir/convert.go
index 024676b87..c892c5b6c 100644
--- a/pkg/services/control/ir/convert.go
+++ b/pkg/services/control/ir/convert.go
@@ -1,8 +1,8 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message"
)
type requestWrapper struct {
diff --git a/pkg/services/control/ir/rpc.go b/pkg/services/control/ir/rpc.go
index 62f800d99..0c9400f6c 100644
--- a/pkg/services/control/ir/rpc.go
+++ b/pkg/services/control/ir/rpc.go
@@ -1,9 +1,9 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
)
const serviceName = "ircontrol.ControlService"
diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go
index d9f65a2fc..9f7a8b879 100644
--- a/pkg/services/control/ir/server/audit.go
+++ b/pkg/services/control/ir/server/audit.go
@@ -6,10 +6,10 @@ import (
"strings"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@@ -36,7 +36,7 @@ func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheck
if !a.enabled.Load() {
return res, err
}
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
+ audit.LogRequestWithKey(a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
return res, err
}
@@ -79,7 +79,7 @@ func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveC
}
}
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
+ audit.LogRequestWithKey(a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
return res, err
}
@@ -90,7 +90,7 @@ func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRe
return res, err
}
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
+ audit.LogRequestWithKey(a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil)
return res, err
}
@@ -102,7 +102,7 @@ func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequ
return res, err
}
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
+ audit.LogRequestWithKey(a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
nil, err == nil)
return res, err
}
diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go
index 0509d2646..2447a8a74 100644
--- a/pkg/services/control/ir/server/calls.go
+++ b/pkg/services/control/ir/server/calls.go
@@ -5,10 +5,10 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"google.golang.org/grpc/codes"
@@ -40,7 +40,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest)
// TickEpoch forces a new epoch.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
+func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -48,12 +48,12 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (
resp := new(control.TickEpochResponse)
resp.SetBody(new(control.TickEpochResponse_Body))
- epoch, err := s.netmapClient.Epoch(ctx)
+ epoch, err := s.netmapClient.Epoch()
if err != nil {
return nil, fmt.Errorf("getting current epoch: %w", err)
}
- vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub())
+ vub, err := s.netmapClient.NewEpochControl(epoch+1, req.GetBody().GetVub())
if err != nil {
return nil, fmt.Errorf("forcing new epoch: %w", err)
}
@@ -69,7 +69,7 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (
// RemoveNode forces a node removal.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
+func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -77,7 +77,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
resp := new(control.RemoveNodeResponse)
resp.SetBody(new(control.RemoveNodeResponse_Body))
- nm, err := s.netmapClient.NetMap(ctx)
+ nm, err := s.netmapClient.NetMap()
if err != nil {
return nil, fmt.Errorf("getting netmap: %w", err)
}
@@ -91,11 +91,11 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
if len(nodeInfo.PublicKey()) == 0 {
return nil, status.Error(codes.NotFound, "no such node")
}
- if nodeInfo.Status().IsOffline() {
+ if nodeInfo.IsOffline() {
return nil, status.Error(codes.FailedPrecondition, "node is already offline")
}
- vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub())
+ vub, err := s.netmapClient.ForceRemovePeer(nodeInfo, req.GetBody().GetVub())
if err != nil {
return nil, fmt.Errorf("forcing node removal: %w", err)
}
@@ -109,7 +109,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
}
// RemoveContainer forces a container removal.
-func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
+func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -124,7 +124,7 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain
return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error())
}
var err error
- vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
+ vub, err = s.removeContainer(containerID, req.GetBody().GetVub())
if err != nil {
return nil, err
}
@@ -138,13 +138,13 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain
return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error())
}
- cids, err := s.containerClient.ContainersOf(ctx, &owner)
+ cids, err := s.containerClient.ContainersOf(&owner)
if err != nil {
return nil, fmt.Errorf("failed to get owner's containers: %w", err)
}
for _, containerID := range cids {
- vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
+ vub, err = s.removeContainer(containerID, req.GetBody().GetVub())
if err != nil {
return nil, err
}
@@ -162,13 +162,13 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain
return resp, nil
}
-func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) {
+func (s *Server) removeContainer(containerID cid.ID, vub uint32) (uint32, error) {
var prm container.DeletePrm
prm.SetCID(containerID[:])
prm.SetControlTX(true)
prm.SetVUB(vub)
- vub, err := s.containerClient.Delete(ctx, prm)
+ vub, err := s.containerClient.Delete(prm)
if err != nil {
return 0, fmt.Errorf("forcing container removal: %w", err)
}
diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go
index 9d5cfefc8..0c2de5300 100644
--- a/pkg/services/control/ir/server/deps.go
+++ b/pkg/services/control/ir/server/deps.go
@@ -5,7 +5,7 @@ import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // HealthStatus must calculate and return current health status of the IR application.
+ // Must calculate and return current health status of the IR application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go
index 0cfca71c1..c2a4f88a6 100644
--- a/pkg/services/control/ir/server/server.go
+++ b/pkg/services/control/ir/server/server.go
@@ -35,7 +35,8 @@ func panicOnPrmValue(n string, v any) {
// the parameterized private key.
func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server {
// verify required parameters
- if prm.healthChecker == nil {
+ switch {
+ case prm.healthChecker == nil:
panicOnPrmValue("health checker", prm.healthChecker)
}
diff --git a/pkg/services/control/ir/server/sign.go b/pkg/services/control/ir/server/sign.go
index d39f6d5f9..f72d51f9e 100644
--- a/pkg/services/control/ir/server/sign.go
+++ b/pkg/services/control/ir/server/sign.go
@@ -6,8 +6,8 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
)
diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go
index d27746263..66d196617 100644
--- a/pkg/services/control/ir/service_frostfs.pb.go
+++ b/pkg/services/control/ir/service_frostfs.pb.go
@@ -5,9 +5,9 @@ package control
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -233,25 +233,14 @@ func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -386,22 +375,11 @@ func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"healthStatus\":"
- out.RawString(prefix)
- v := int32(x.HealthStatus)
- if vv, ok := HealthStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ const prefix string = ",\"healthStatus\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.HealthStatus))
}
out.RawByte('}')
}
@@ -586,25 +564,14 @@ func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -739,16 +706,10 @@ func (x *TickEpochRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix[1:])
out.Uint32(x.Vub)
}
out.RawByte('}')
@@ -782,15 +743,7 @@ func (x *TickEpochRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "vub":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Vub = f
}
}
@@ -926,25 +879,14 @@ func (x *TickEpochRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1079,16 +1021,10 @@ func (x *TickEpochResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix[1:])
out.Uint32(x.Vub)
}
out.RawByte('}')
@@ -1122,15 +1058,7 @@ func (x *TickEpochResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "vub":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Vub = f
}
}
@@ -1266,25 +1194,14 @@ func (x *TickEpochResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1439,29 +1356,14 @@ func (x *RemoveNodeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- if x.Key != nil {
- out.Base64Bytes(x.Key)
- } else {
- out.String("")
- }
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Key)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
+ const prefix string = ",\"vub\":"
out.RawString(prefix)
out.Uint32(x.Vub)
}
@@ -1496,27 +1398,13 @@ func (x *RemoveNodeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "key":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Key = f
}
case "vub":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Vub = f
}
}
@@ -1652,25 +1540,14 @@ func (x *RemoveNodeRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1805,16 +1682,10 @@ func (x *RemoveNodeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix[1:])
out.Uint32(x.Vub)
}
out.RawByte('}')
@@ -1848,15 +1719,7 @@ func (x *RemoveNodeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "vub":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Vub = f
}
}
@@ -1992,25 +1855,14 @@ func (x *RemoveNodeResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2185,43 +2037,19 @@ func (x *RemoveContainerRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"owner\":"
+ const prefix string = ",\"owner\":"
out.RawString(prefix)
- if x.Owner != nil {
- out.Base64Bytes(x.Owner)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Owner)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
+ const prefix string = ",\"vub\":"
out.RawString(prefix)
out.Uint32(x.Vub)
}
@@ -2256,39 +2084,19 @@ func (x *RemoveContainerRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "owner":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Owner = f
}
case "vub":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Vub = f
}
}
@@ -2424,25 +2232,14 @@ func (x *RemoveContainerRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2577,16 +2374,10 @@ func (x *RemoveContainerResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix[1:])
out.Uint32(x.Vub)
}
out.RawByte('}')
@@ -2620,15 +2411,7 @@ func (x *RemoveContainerResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "vub":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Vub = f
}
}
@@ -2764,25 +2547,14 @@ func (x *RemoveContainerResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go
index 407eec6ad..b230726a9 100644
--- a/pkg/services/control/ir/types_frostfs.pb.go
+++ b/pkg/services/control/ir/types_frostfs.pb.go
@@ -5,9 +5,9 @@ package control
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -155,35 +155,16 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- if x.Key != nil {
- out.Base64Bytes(x.Key)
- } else {
- out.String("")
- }
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Key)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
- if x.Sign != nil {
- out.Base64Bytes(x.Sign)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Sign)
}
out.RawByte('}')
}
@@ -216,25 +197,13 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "key":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Key = f
}
case "signature":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Sign = f
}
}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 0c4236d0e..80aece008 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -1,10 +1,8 @@
package control
import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common"
)
const serviceName = "control.ControlService"
@@ -17,6 +15,7 @@ const (
rpcListShards = "ListShards"
rpcSetShardMode = "SetShardMode"
rpcSynchronizeTree = "SynchronizeTree"
+ rpcEvacuateShard = "EvacuateShard"
rpcStartShardEvacuation = "StartShardEvacuation"
rpcGetShardEvacuationStatus = "GetShardEvacuationStatus"
rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus"
@@ -32,7 +31,6 @@ const (
rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides"
rpcDetachShards = "DetachShards"
rpcStartShardRebuild = "StartShardRebuild"
- rpcListShardsForObject = "ListShardsForObject"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -76,7 +74,6 @@ func SetNetmapStatus(
// GetNetmapStatus executes ControlService.GetNetmapStatus RPC.
func GetNetmapStatus(
- _ context.Context,
cli *client.Client,
req *GetNetmapStatusRequest,
opts ...client.CallOption,
@@ -165,6 +162,19 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl
return wResp.message, nil
}
+// EvacuateShard executes ControlService.EvacuateShard RPC.
+func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) {
+ wResp := newResponseWrapper[EvacuateShardResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
// StartShardEvacuation executes ControlService.StartShardEvacuation RPC.
func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) {
wResp := newResponseWrapper[StartShardEvacuationResponse]()
@@ -282,7 +292,7 @@ func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverride
return wResp.message, nil
}
-// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
+// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) {
wResp := newResponseWrapper[GetChainLocalOverrideResponse]()
wReq := &requestWrapper{m: req}
@@ -365,22 +375,3 @@ func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts .
return wResp.message, nil
}
-
-// ListShardsForObject executes ControlService.ListShardsForObject RPC.
-func ListShardsForObject(
- cli *client.Client,
- req *ListShardsForObjectRequest,
- opts ...client.CallOption,
-) (*ListShardsForObjectResponse, error) {
- wResp := newResponseWrapper[ListShardsForObjectResponse]()
-
- wReq := &requestWrapper{
- m: req,
- }
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
diff --git a/pkg/services/control/server/ctrlmessage/sign.go b/pkg/services/control/server/ctrlmessage/sign.go
index d9d5c5f5e..31425b337 100644
--- a/pkg/services/control/server/ctrlmessage/sign.go
+++ b/pkg/services/control/server/ctrlmessage/sign.go
@@ -4,8 +4,8 @@ import (
"crypto/ecdsa"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
)
diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go
index ffd36962b..a4111bddb 100644
--- a/pkg/services/control/server/detach_shards.go
+++ b/pkg/services/control/server/detach_shards.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
+func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -19,7 +19,7 @@ func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequ
shardIDs := s.getShardIDList(req.GetBody().GetShard_ID())
- if err := s.s.DetachShards(ctx, shardIDs); err != nil {
+ if err := s.s.DetachShards(shardIDs); err != nil {
if errors.As(err, new(logicerr.Logical)) {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
new file mode 100644
index 000000000..ae3413373
--- /dev/null
+++ b/pkg/services/control/server/evacuate.go
@@ -0,0 +1,188 @@
+package control
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
+
+func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ prm := engine.EvacuateShardPrm{
+ ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
+ IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ ObjectsHandler: s.replicateObject,
+ Scope: engine.EvacuateScopeObjects,
+ }
+
+ res, err := s.s.Evacuate(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.EvacuateShardResponse{
+ Body: &control.EvacuateShardResponse_Body{
+ Count: uint32(res.ObjectsEvacuated()),
+ },
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
+ cid, ok := obj.ContainerID()
+ if !ok {
+ // Return nil to prevent situations where a shard can't be evacuated
+ // because of a single bad/corrupted object.
+ return false, nil
+ }
+
+ nodes, err := s.getContainerNodes(cid)
+ if err != nil {
+ return false, err
+ }
+
+ if len(nodes) == 0 {
+ return false, nil
+ }
+
+ var res replicatorResult
+ task := replicator.Task{
+ NumCopies: 1,
+ Addr: addr,
+ Obj: obj,
+ Nodes: nodes,
+ }
+ s.replicator.HandleReplicationTask(ctx, task, &res)
+
+ if res.count == 0 {
+ return false, errors.New("object was not replicated")
+ }
+ return true, nil
+}
+
+func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
+ nodes, err := s.getContainerNodes(contID)
+ if err != nil {
+ return false, "", err
+ }
+ if len(nodes) == 0 {
+ return false, "", nil
+ }
+
+ for _, node := range nodes {
+ err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
+ if err == nil {
+ return true, hex.EncodeToString(node.PublicKey()), nil
+ }
+ }
+ return false, "", err
+}
+
+func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
+ rawCID := make([]byte, sha256.Size)
+ contID.Encode(rawCID)
+
+ var height uint64
+ for {
+ op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
+ if err != nil {
+ return err
+ }
+
+ if op.Time == 0 {
+ return nil
+ }
+
+ req := &tree.ApplyRequest{
+ Body: &tree.ApplyRequest_Body{
+ ContainerId: rawCID,
+ TreeId: treeID,
+ Operation: &tree.LogMove{
+ ParentId: op.Parent,
+ Meta: op.Meta.Bytes(),
+ ChildId: op.Child,
+ },
+ },
+ }
+
+ err = tree.SignMessage(req, s.key)
+ if err != nil {
+ return fmt.Errorf("can't message apply request: %w", err)
+ }
+
+ err = s.treeService.ReplicateTreeOp(ctx, node, req)
+ if err != nil {
+ return err
+ }
+
+ height = op.Time + 1
+ }
+}
+
+func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
+ nm, err := s.netMapSrc.GetNetMap(0)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := s.cnrSrc.Get(contID)
+ if err != nil {
+ return nil, err
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ contID.Encode(binCnr)
+
+ ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
+ if err != nil {
+ return nil, errFailedToBuildListOfContainerNodes
+ }
+
+ nodes := placement.FlattenNodes(ns)
+ bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
+ for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
+ if bytes.Equal(nodes[i].PublicKey(), bs) {
+ copy(nodes[i:], nodes[i+1:])
+ nodes = nodes[:len(nodes)-1]
+ }
+ }
+ return nodes, nil
+}
+
+type replicatorResult struct {
+ count int
+}
+
+// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
+func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
+ r.count++
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index f3ba9015e..aacebe9e3 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -1,32 +1,17 @@
package control
import (
- "bytes"
"context"
- "crypto/sha256"
- "encoding/hex"
"errors"
- "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
-var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
-
func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) {
err := s.isValidRequest(req)
if err != nil {
@@ -38,17 +23,16 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
}
prm := engine.EvacuateShardPrm{
- ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
- IgnoreErrors: req.GetBody().GetIgnoreErrors(),
- ObjectsHandler: s.replicateObject,
- TreeHandler: s.replicateTree,
- Scope: engine.EvacuateScope(req.GetBody().GetScope()),
- ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
- ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
- RepOneOnly: req.GetBody().GetRepOneOnly(),
+ ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
+ IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ ObjectsHandler: s.replicateObject,
+ TreeHandler: s.replicateTree,
+ Async: true,
+ Scope: engine.EvacuateScope(req.GetBody().GetScope()),
}
- if err = s.s.Evacuate(ctx, prm); err != nil {
+ _, err = s.s.Evacuate(ctx, prm)
+ if err != nil {
var logicalErr logicerr.Logical
if errors.As(err, &logicalErr) {
return nil, status.Error(codes.Aborted, err.Error())
@@ -148,133 +132,3 @@ func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.Re
}
return resp, nil
}
-
-func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- cid, ok := obj.ContainerID()
- if !ok {
- // Return nil to prevent situations where a shard can't be evacuated
- // because of a single bad/corrupted object.
- return false, nil
- }
-
- nodes, err := s.getContainerNodes(ctx, cid)
- if err != nil {
- return false, err
- }
-
- if len(nodes) == 0 {
- return false, nil
- }
-
- var res replicatorResult
- task := replicator.Task{
- NumCopies: 1,
- Addr: addr,
- Obj: obj,
- Nodes: nodes,
- }
- s.replicator.HandleReplicationTask(ctx, task, &res)
-
- if res.count == 0 {
- return false, errors.New("object was not replicated")
- }
- return true, nil
-}
-
-func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
- nodes, err := s.getContainerNodes(ctx, contID)
- if err != nil {
- return false, "", err
- }
- if len(nodes) == 0 {
- return false, "", nil
- }
-
- for _, node := range nodes {
- err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
- if err == nil {
- return true, hex.EncodeToString(node.PublicKey()), nil
- }
- }
- return false, "", err
-}
-
-func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
- rawCID := make([]byte, sha256.Size)
- contID.Encode(rawCID)
-
- var height uint64
- for {
- op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
- if err != nil {
- return err
- }
-
- if op.Time == 0 {
- return nil
- }
-
- req := &tree.ApplyRequest{
- Body: &tree.ApplyRequest_Body{
- ContainerId: rawCID,
- TreeId: treeID,
- Operation: &tree.LogMove{
- ParentId: op.Parent,
- Meta: op.Bytes(),
- ChildId: op.Child,
- },
- },
- }
-
- err = tree.SignMessage(req, s.key)
- if err != nil {
- return fmt.Errorf("can't message apply request: %w", err)
- }
-
- err = s.treeService.ReplicateTreeOp(ctx, node, req)
- if err != nil {
- return err
- }
-
- height = op.Time + 1
- }
-}
-
-func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) {
- nm, err := s.netMapSrc.GetNetMap(ctx, 0)
- if err != nil {
- return nil, err
- }
-
- c, err := s.cnrSrc.Get(ctx, contID)
- if err != nil {
- return nil, err
- }
-
- binCnr := make([]byte, sha256.Size)
- contID.Encode(binCnr)
-
- ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
- if err != nil {
- return nil, errFailedToBuildListOfContainerNodes
- }
-
- nodes := placement.FlattenNodes(ns)
- bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
- if bytes.Equal(nodes[i].PublicKey(), bs) {
- copy(nodes[i:], nodes[i+1:])
- nodes = nodes[:len(nodes)-1]
- }
- }
- return nodes, nil
-}
-
-type replicatorResult struct {
- count int
-}
-
-// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
-func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
- r.count++
-}
diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go
index a8ef7809e..d9fefc38e 100644
--- a/pkg/services/control/server/gc.go
+++ b/pkg/services/control/server/gc.go
@@ -42,7 +42,8 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques
prm.WithForceRemoval()
prm.WithAddress(addrList[i])
- if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil {
+ _, err := s.s.Delete(ctx, prm)
+ if err != nil && firstErr == nil {
firstErr = err
}
}
diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go
index 5e0496910..1c038253a 100644
--- a/pkg/services/control/server/get_netmap_status.go
+++ b/pkg/services/control/server/get_netmap_status.go
@@ -10,12 +10,12 @@ import (
)
// GetNetmapStatus gets node status in FrostFS network.
-func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
+func (s *Server) GetNetmapStatus(_ context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
- st, epoch, err := s.nodeState.GetNetmapStatus(ctx)
+ st, epoch, err := s.nodeState.GetNetmapStatus()
if err != nil {
return nil, err
}
diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go
deleted file mode 100644
index 39565ed50..000000000
--- a/pkg/services/control/server/list_shards_for_object.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- var obj oid.ID
- err = obj.DecodeString(req.GetBody().GetObjectId())
- if err != nil {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
-
- var cnr cid.ID
- err = cnr.DecodeString(req.GetBody().GetContainerId())
- if err != nil {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
-
- resp := new(control.ListShardsForObjectResponse)
- body := new(control.ListShardsForObjectResponse_Body)
- resp.SetBody(body)
-
- var objAddr oid.Address
- objAddr.SetContainer(cnr)
- objAddr.SetObject(obj)
- info, err := s.s.ListShardsForObject(ctx, objAddr)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- if len(info) == 0 {
- return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject)
- }
-
- body.SetShard_ID(shardInfoToProto(info))
-
- // Sign the response
- if err := ctrlmessage.Sign(s.key, resp); err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func shardInfoToProto(infos []shard.Info) [][]byte {
- shardInfos := make([][]byte, 0, len(infos))
- for _, info := range infos {
- shardInfos = append(shardInfos, *info.ID)
- }
-
- return shardInfos
-}
diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go
index 59d701bc6..f3fe56a46 100644
--- a/pkg/services/control/server/server.go
+++ b/pkg/services/control/server/server.go
@@ -1,7 +1,6 @@
package control
import (
- "context"
"crypto/ecdsa"
"sync/atomic"
@@ -27,13 +26,13 @@ type Server struct {
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // NetmapStatus must calculate and return current status of the node in FrostFS network map.
+ // Must calculate and return current status of the node in FrostFS network map.
//
// If status can not be calculated for any reason,
// control.netmapStatus_STATUS_UNDEFINED should be returned.
NetmapStatus() control.NetmapStatus
- // HealthStatus must calculate and return current health status of the node application.
+ // Must calculate and return current health status of the node application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
@@ -46,13 +45,13 @@ type NodeState interface {
//
// If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed
// in the network settings, the node additionally starts local maintenance.
- SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error
+ SetNetmapStatus(st control.NetmapStatus) error
// ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE)
// but starts local maintenance regardless of the network settings.
- ForceMaintenance(ctx context.Context) error
+ ForceMaintenance() error
- GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error)
+ GetNetmapStatus() (control.NetmapStatus, uint64, error)
}
// LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine
diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go
index 529041dca..3fd69df12 100644
--- a/pkg/services/control/server/set_netmap_status.go
+++ b/pkg/services/control/server/set_netmap_status.go
@@ -12,7 +12,7 @@ import (
// SetNetmapStatus sets node status in FrostFS network.
//
// If request is unsigned or signed by disallowed key, permission error returns.
-func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
+func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
// verify request
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -29,9 +29,9 @@ func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStat
"force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE)
}
- err = s.nodeState.ForceMaintenance(ctx)
+ err = s.nodeState.ForceMaintenance()
} else {
- err = s.nodeState.SetNetmapStatus(ctx, st)
+ err = s.nodeState.SetNetmapStatus(st)
}
if err != nil {
diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go
index 4f8796263..52835c41d 100644
--- a/pkg/services/control/server/set_shard_mode.go
+++ b/pkg/services/control/server/set_shard_mode.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
+func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
// verify request
err := s.isValidRequest(req)
if err != nil {
@@ -38,7 +38,7 @@ func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequ
}
for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
- err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter())
+ err = s.s.SetShardMode(shardID, m, req.GetBody().GetResetErrorCounter())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/sign.go b/pkg/services/control/server/sign.go
index 0e8e24b6e..514af273f 100644
--- a/pkg/services/control/server/sign.go
+++ b/pkg/services/control/server/sign.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
)
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 4c539acfc..04994328a 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -30,6 +30,11 @@ service ControlService {
// Synchronizes all log operations for the specified tree.
rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use
+ // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ rpc EvacuateShard(EvacuateShardRequest) returns (EvacuateShardResponse);
+
// StartShardEvacuation starts moving all data from one shard to the others.
rpc StartShardEvacuation(StartShardEvacuationRequest)
returns (StartShardEvacuationResponse);
@@ -89,9 +94,6 @@ service ControlService {
// StartShardRebuild starts shard rebuild process.
rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse);
-
- // ListShardsForObject returns shard info where object is stored.
- rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse);
}
// Health check request.
@@ -392,12 +394,6 @@ message StartShardEvacuationRequest {
bool ignore_errors = 2;
// Evacuation scope.
uint32 scope = 3;
- // Count of concurrent container evacuation workers.
- uint32 container_worker_count = 4;
- // Count of concurrent object evacuation workers.
- uint32 object_worker_count = 5;
- // Choose for evacuation objects in `REP 1` containers only.
- bool rep_one_only = 6;
}
Body body = 1;
@@ -732,23 +728,3 @@ message StartShardRebuildResponse {
Signature signature = 2;
}
-
-message ListShardsForObjectRequest {
- message Body {
- string object_id = 1;
- string container_id = 2;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
-
-message ListShardsForObjectResponse {
- message Body {
- // List of the node's shards storing object.
- repeated bytes shard_ID = 1;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 44849d591..019cac290 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -5,9 +5,9 @@ package control
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -233,25 +233,14 @@ func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -406,37 +395,16 @@ func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"netmapStatus\":"
- out.RawString(prefix)
- v := int32(x.NetmapStatus)
- if vv, ok := NetmapStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ const prefix string = ",\"netmapStatus\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.NetmapStatus))
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"healthStatus\":"
+ const prefix string = ",\"healthStatus\":"
out.RawString(prefix)
- v := int32(x.HealthStatus)
- if vv, ok := HealthStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ out.Int32(int32(x.HealthStatus))
}
out.RawByte('}')
}
@@ -643,25 +611,14 @@ func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -816,30 +773,14 @@ func (x *SetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"status\":"
- out.RawString(prefix)
- v := int32(x.Status)
- if vv, ok := NetmapStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ const prefix string = ",\"status\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.Status))
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"forceMaintenance\":"
+ const prefix string = ",\"forceMaintenance\":"
out.RawString(prefix)
out.Bool(x.ForceMaintenance)
}
@@ -1032,25 +973,14 @@ func (x *SetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1324,25 +1254,14 @@ func (x *SetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1616,25 +1535,14 @@ func (x *GetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1789,34 +1697,16 @@ func (x *GetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"status\":"
- out.RawString(prefix)
- v := int32(x.Status)
- if vv, ok := NetmapStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ const prefix string = ",\"status\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.Status))
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"epoch\":"
+ const prefix string = ",\"epoch\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10)
- out.RawByte('"')
+ out.Uint64(x.Epoch)
}
out.RawByte('}')
}
@@ -1871,15 +1761,7 @@ func (x *GetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "epoch":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.Epoch = f
}
}
@@ -2015,25 +1897,14 @@ func (x *GetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2168,26 +2039,16 @@ func (x *DropObjectsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"addressList\":"
- out.RawString(prefix)
+ const prefix string = ",\"addressList\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.AddressList {
if i != 0 {
out.RawByte(',')
}
- if x.AddressList[i] != nil {
- out.Base64Bytes(x.AddressList[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.AddressList[i])
}
out.RawByte(']')
}
@@ -2225,13 +2086,7 @@ func (x *DropObjectsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -2371,25 +2226,14 @@ func (x *DropObjectsRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2663,25 +2507,14 @@ func (x *DropObjectsResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2955,25 +2788,14 @@ func (x *ListShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3114,16 +2936,10 @@ func (x *ListShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shards\":"
- out.RawString(prefix)
+ const prefix string = ",\"shards\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Shards {
if i != 0 {
@@ -3308,25 +3124,14 @@ func (x *ListShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3501,51 +3306,26 @@ func (x *SetShardModeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Shard_ID[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"mode\":"
+ const prefix string = ",\"mode\":"
out.RawString(prefix)
- v := int32(x.Mode)
- if vv, ok := ShardMode_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ out.Int32(int32(x.Mode))
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"resetErrorCounter\":"
+ const prefix string = ",\"resetErrorCounter\":"
out.RawString(prefix)
out.Bool(x.ResetErrorCounter)
}
@@ -3583,13 +3363,7 @@ func (x *SetShardModeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -3757,25 +3531,14 @@ func (x *SetShardModeRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4049,25 +3812,14 @@ func (x *SetShardModeResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4242,43 +3994,21 @@ func (x *SynchronizeTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"height\":"
+ const prefix string = ",\"height\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10)
- out.RawByte('"')
+ out.Uint64(x.Height)
}
out.RawByte('}')
}
@@ -4311,13 +4041,7 @@ func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -4329,15 +4053,7 @@ func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "height":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.Height = f
}
}
@@ -4473,25 +4189,14 @@ func (x *SynchronizeTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4765,25 +4470,14 @@ func (x *SynchronizeTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4938,36 +4632,21 @@ func (x *EvacuateShardRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Shard_ID[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"ignoreErrors\":"
+ const prefix string = ",\"ignoreErrors\":"
out.RawString(prefix)
out.Bool(x.IgnoreErrors)
}
@@ -5005,13 +4684,7 @@ func (x *EvacuateShardRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -5157,25 +4830,14 @@ func (x *EvacuateShardRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5310,16 +4972,10 @@ func (x *EvacuateShardResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"count\":"
- out.RawString(prefix)
+ const prefix string = ",\"count\":"
+ out.RawString(prefix[1:])
out.Uint32(x.Count)
}
out.RawByte('}')
@@ -5353,15 +5009,7 @@ func (x *EvacuateShardResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "count":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Count = f
}
}
@@ -5497,25 +5145,14 @@ func (x *EvacuateShardResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5670,36 +5307,21 @@ func (x *FlushCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Shard_ID[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"seal\":"
+ const prefix string = ",\"seal\":"
out.RawString(prefix)
out.Bool(x.Seal)
}
@@ -5737,13 +5359,7 @@ func (x *FlushCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -5889,25 +5505,14 @@ func (x *FlushCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6181,25 +5786,14 @@ func (x *FlushCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6354,25 +5948,14 @@ func (x *DoctorRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"concurrency\":"
- out.RawString(prefix)
+ const prefix string = ",\"concurrency\":"
+ out.RawString(prefix[1:])
out.Uint32(x.Concurrency)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"removeDuplicates\":"
+ const prefix string = ",\"removeDuplicates\":"
out.RawString(prefix)
out.Bool(x.RemoveDuplicates)
}
@@ -6407,15 +5990,7 @@ func (x *DoctorRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "concurrency":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Concurrency = f
}
case "removeDuplicates":
@@ -6557,25 +6132,14 @@ func (x *DoctorRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6849,25 +6413,14 @@ func (x *DoctorResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6958,12 +6511,9 @@ func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool {
}
type StartShardEvacuationRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- IgnoreErrors bool `json:"ignoreErrors"`
- Scope uint32 `json:"scope"`
- ContainerWorkerCount uint32 `json:"containerWorkerCount"`
- ObjectWorkerCount uint32 `json:"objectWorkerCount"`
- RepOneOnly bool `json:"repOneOnly"`
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+ Scope uint32 `json:"scope"`
}
var (
@@ -6983,9 +6533,6 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
size += proto.UInt32Size(3, x.Scope)
- size += proto.UInt32Size(4, x.ContainerWorkerCount)
- size += proto.UInt32Size(5, x.ObjectWorkerCount)
- size += proto.BoolSize(6, x.RepOneOnly)
return size
}
@@ -7011,15 +6558,6 @@ func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMar
if x.Scope != 0 {
mm.AppendUint32(3, x.Scope)
}
- if x.ContainerWorkerCount != 0 {
- mm.AppendUint32(4, x.ContainerWorkerCount)
- }
- if x.ObjectWorkerCount != 0 {
- mm.AppendUint32(5, x.ObjectWorkerCount)
- }
- if x.RepOneOnly {
- mm.AppendBool(6, x.RepOneOnly)
- }
}
// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
@@ -7049,24 +6587,6 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err er
return fmt.Errorf("cannot unmarshal field %s", "Scope")
}
x.Scope = data
- case 4: // ContainerWorkerCount
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount")
- }
- x.ContainerWorkerCount = data
- case 5: // ObjectWorkerCount
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount")
- }
- x.ObjectWorkerCount = data
- case 6: // RepOneOnly
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly")
- }
- x.RepOneOnly = data
}
}
return nil
@@ -7098,33 +6618,6 @@ func (x *StartShardEvacuationRequest_Body) GetScope() uint32 {
func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) {
x.Scope = v
}
-func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 {
- if x != nil {
- return x.ContainerWorkerCount
- }
- return 0
-}
-func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) {
- x.ContainerWorkerCount = v
-}
-func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 {
- if x != nil {
- return x.ObjectWorkerCount
- }
- return 0
-}
-func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) {
- x.ObjectWorkerCount = v
-}
-func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool {
- if x != nil {
- return x.RepOneOnly
- }
- return false
-}
-func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) {
- x.RepOneOnly = v
-}
// MarshalJSON implements the json.Marshaler interface.
func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
@@ -7137,79 +6630,29 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Shard_ID[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"ignoreErrors\":"
+ const prefix string = ",\"ignoreErrors\":"
out.RawString(prefix)
out.Bool(x.IgnoreErrors)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"scope\":"
+ const prefix string = ",\"scope\":"
out.RawString(prefix)
out.Uint32(x.Scope)
}
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerWorkerCount\":"
- out.RawString(prefix)
- out.Uint32(x.ContainerWorkerCount)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"objectWorkerCount\":"
- out.RawString(prefix)
- out.Uint32(x.ObjectWorkerCount)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"repOneOnly\":"
- out.RawString(prefix)
- out.Bool(x.RepOneOnly)
- }
out.RawByte('}')
}
@@ -7244,13 +6687,7 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -7266,51 +6703,9 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "scope":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Scope = f
}
- case "containerWorkerCount":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.ContainerWorkerCount = f
- }
- case "objectWorkerCount":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.ObjectWorkerCount = f
- }
- case "repOneOnly":
- {
- var f bool
- f = in.Bool()
- x.RepOneOnly = f
- }
}
in.WantComma()
}
@@ -7444,25 +6839,14 @@ func (x *StartShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7736,25 +7120,14 @@ func (x *StartShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -8028,25 +7401,14 @@ func (x *GetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -8216,19 +7578,11 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalEasyJSON(ou
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"value\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Value, 10)
- out.RawByte('"')
+ const prefix string = ",\"value\":"
+ out.RawString(prefix[1:])
+ out.Int64(x.Value)
}
out.RawByte('}')
}
@@ -8261,15 +7615,7 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalEasyJSON(
case "value":
{
var f int64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := int64(v)
- f = pv
+ f = in.Int64()
x.Value = f
}
}
@@ -8361,19 +7707,11 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalEasyJSON(out *jw
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"seconds\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Seconds, 10)
- out.RawByte('"')
+ const prefix string = ",\"seconds\":"
+ out.RawString(prefix[1:])
+ out.Int64(x.Seconds)
}
out.RawByte('}')
}
@@ -8406,15 +7744,7 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalEasyJSON(in *j
case "seconds":
{
var f int64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := int64(v)
- f = pv
+ f = in.Int64()
x.Seconds = f
}
}
@@ -8732,157 +8062,73 @@ func (x *GetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Wri
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"totalObjects\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalObjects, 10)
- out.RawByte('"')
+ const prefix string = ",\"totalObjects\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.TotalObjects)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"evacuatedObjects\":"
+ const prefix string = ",\"evacuatedObjects\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedObjects, 10)
- out.RawByte('"')
+ out.Uint64(x.EvacuatedObjects)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"failedObjects\":"
+ const prefix string = ",\"failedObjects\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedObjects, 10)
- out.RawByte('"')
+ out.Uint64(x.FailedObjects)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
+ const prefix string = ",\"shardID\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Shard_ID[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"status\":"
+ const prefix string = ",\"status\":"
out.RawString(prefix)
- v := int32(x.Status)
- if vv, ok := GetShardEvacuationStatusResponse_Body_Status_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ out.Int32(int32(x.Status))
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"duration\":"
+ const prefix string = ",\"duration\":"
out.RawString(prefix)
x.Duration.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"startedAt\":"
+ const prefix string = ",\"startedAt\":"
out.RawString(prefix)
x.StartedAt.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"errorMessage\":"
+ const prefix string = ",\"errorMessage\":"
out.RawString(prefix)
out.String(x.ErrorMessage)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"skippedObjects\":"
+ const prefix string = ",\"skippedObjects\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.SkippedObjects, 10)
- out.RawByte('"')
+ out.Uint64(x.SkippedObjects)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"totalTrees\":"
+ const prefix string = ",\"totalTrees\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalTrees, 10)
- out.RawByte('"')
+ out.Uint64(x.TotalTrees)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"evacuatedTrees\":"
+ const prefix string = ",\"evacuatedTrees\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedTrees, 10)
- out.RawByte('"')
+ out.Uint64(x.EvacuatedTrees)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"failedTrees\":"
+ const prefix string = ",\"failedTrees\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedTrees, 10)
- out.RawByte('"')
+ out.Uint64(x.FailedTrees)
}
out.RawByte('}')
}
@@ -8915,43 +8161,19 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex
case "totalObjects":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.TotalObjects = f
}
case "evacuatedObjects":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.EvacuatedObjects = f
}
case "failedObjects":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.FailedObjects = f
}
case "shardID":
@@ -8960,13 +8182,7 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -9018,57 +8234,25 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex
case "skippedObjects":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.SkippedObjects = f
}
case "totalTrees":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.TotalTrees = f
}
case "evacuatedTrees":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.EvacuatedTrees = f
}
case "failedTrees":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.FailedTrees = f
}
}
@@ -9204,25 +8388,14 @@ func (x *GetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -9496,25 +8669,14 @@ func (x *ResetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -9788,25 +8950,14 @@ func (x *ResetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -10080,25 +9231,14 @@ func (x *StopShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -10372,25 +9512,14 @@ func (x *StopShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -10548,31 +9677,16 @@ func (x *AddChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
x.Target.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chain\":"
+ const prefix string = ",\"chain\":"
out.RawString(prefix)
- if x.Chain != nil {
- out.Base64Bytes(x.Chain)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Chain)
}
out.RawByte('}')
}
@@ -10612,13 +9726,7 @@ func (x *AddChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer)
case "chain":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Chain = f
}
}
@@ -10754,25 +9862,14 @@ func (x *AddChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -10907,21 +10004,11 @@ func (x *AddChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chainId\":"
- out.RawString(prefix)
- if x.ChainId != nil {
- out.Base64Bytes(x.ChainId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"chainId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ChainId)
}
out.RawByte('}')
}
@@ -10954,13 +10041,7 @@ func (x *AddChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer)
case "chainId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ChainId = f
}
}
@@ -11096,25 +10177,14 @@ func (x *AddChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -11272,31 +10342,16 @@ func (x *GetChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
x.Target.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chainId\":"
+ const prefix string = ",\"chainId\":"
out.RawString(prefix)
- if x.ChainId != nil {
- out.Base64Bytes(x.ChainId)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.ChainId)
}
out.RawByte('}')
}
@@ -11336,13 +10391,7 @@ func (x *GetChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer)
case "chainId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ChainId = f
}
}
@@ -11478,25 +10527,14 @@ func (x *GetChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -11631,21 +10669,11 @@ func (x *GetChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chain\":"
- out.RawString(prefix)
- if x.Chain != nil {
- out.Base64Bytes(x.Chain)
- } else {
- out.String("")
- }
+ const prefix string = ",\"chain\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Chain)
}
out.RawByte('}')
}
@@ -11678,13 +10706,7 @@ func (x *GetChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer)
case "chain":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Chain = f
}
}
@@ -11820,25 +10842,14 @@ func (x *GetChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -11976,16 +10987,10 @@ func (x *ListChainLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Write
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
x.Target.MarshalEasyJSON(out)
}
out.RawByte('}')
@@ -12156,25 +11161,14 @@ func (x *ListChainLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -12309,26 +11303,16 @@ func (x *ListChainLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writ
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chains\":"
- out.RawString(prefix)
+ const prefix string = ",\"chains\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Chains {
if i != 0 {
out.RawByte(',')
}
- if x.Chains[i] != nil {
- out.Base64Bytes(x.Chains[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Chains[i])
}
out.RawByte(']')
}
@@ -12366,13 +11350,7 @@ func (x *ListChainLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexe
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -12512,25 +11490,14 @@ func (x *ListChainLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -12665,16 +11632,10 @@ func (x *ListTargetsLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Wri
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chainName\":"
- out.RawString(prefix)
+ const prefix string = ",\"chainName\":"
+ out.RawString(prefix[1:])
out.String(x.ChainName)
}
out.RawByte('}')
@@ -12844,25 +11805,14 @@ func (x *ListTargetsLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -13003,16 +11953,10 @@ func (x *ListTargetsLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Wr
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"targets\":"
- out.RawString(prefix)
+ const prefix string = ",\"targets\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Targets {
if i != 0 {
@@ -13197,25 +12141,14 @@ func (x *ListTargetsLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -13373,31 +12306,16 @@ func (x *RemoveChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writ
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
x.Target.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chainId\":"
+ const prefix string = ",\"chainId\":"
out.RawString(prefix)
- if x.ChainId != nil {
- out.Base64Bytes(x.ChainId)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.ChainId)
}
out.RawByte('}')
}
@@ -13437,13 +12355,7 @@ func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexe
case "chainId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ChainId = f
}
}
@@ -13579,25 +12491,14 @@ func (x *RemoveChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -13871,25 +12772,14 @@ func (x *RemoveChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer)
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -14027,16 +12917,10 @@ func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalEasyJSON(out *jwr
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
x.Target.MarshalEasyJSON(out)
}
out.RawByte('}')
@@ -14207,25 +13091,14 @@ func (x *RemoveChainLocalOverridesByTargetRequest) MarshalEasyJSON(out *jwriter.
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -14499,25 +13372,14 @@ func (x *RemoveChainLocalOverridesByTargetResponse) MarshalEasyJSON(out *jwriter
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -14732,66 +13594,36 @@ func (x *SealWriteCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Shard_ID[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"ignoreErrors\":"
+ const prefix string = ",\"ignoreErrors\":"
out.RawString(prefix)
out.Bool(x.IgnoreErrors)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"async\":"
+ const prefix string = ",\"async\":"
out.RawString(prefix)
out.Bool(x.Async)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"restoreMode\":"
+ const prefix string = ",\"restoreMode\":"
out.RawString(prefix)
out.Bool(x.RestoreMode)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shrink\":"
+ const prefix string = ",\"shrink\":"
out.RawString(prefix)
out.Bool(x.Shrink)
}
@@ -14829,13 +13661,7 @@ func (x *SealWriteCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -14999,25 +13825,14 @@ func (x *SealWriteCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -15192,39 +14007,19 @@ func (x *SealWriteCacheResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- if x.Shard_ID != nil {
- out.Base64Bytes(x.Shard_ID)
- } else {
- out.String("")
- }
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Shard_ID)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"success\":"
+ const prefix string = ",\"success\":"
out.RawString(prefix)
out.Bool(x.Success)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"error\":"
+ const prefix string = ",\"error\":"
out.RawString(prefix)
out.String(x.Error)
}
@@ -15259,13 +14054,7 @@ func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer)
case "shardID":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Shard_ID = f
}
case "success":
@@ -15375,16 +14164,10 @@ func (x *SealWriteCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"results\":"
- out.RawString(prefix)
+ const prefix string = ",\"results\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Results {
if i != 0 {
@@ -15569,25 +14352,14 @@ func (x *SealWriteCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -15722,26 +14494,16 @@ func (x *DetachShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Shard_ID[i])
}
out.RawByte(']')
}
@@ -15779,13 +14541,7 @@ func (x *DetachShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -15925,25 +14681,14 @@ func (x *DetachShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -16217,25 +14962,14 @@ func (x *DetachShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -16410,46 +15144,26 @@ func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Shard_ID {
if i != 0 {
out.RawByte(',')
}
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Shard_ID[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"targetFillPercent\":"
+ const prefix string = ",\"targetFillPercent\":"
out.RawString(prefix)
out.Uint32(x.TargetFillPercent)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"concurrencyLimit\":"
+ const prefix string = ",\"concurrencyLimit\":"
out.RawString(prefix)
out.Uint32(x.ConcurrencyLimit)
}
@@ -16487,13 +15201,7 @@ func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list [][]byte
in.Delim('[')
for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
list = append(list, f)
in.WantComma()
}
@@ -16503,29 +15211,13 @@ func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "targetFillPercent":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.TargetFillPercent = f
}
case "concurrencyLimit":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.ConcurrencyLimit = f
}
}
@@ -16661,25 +15353,14 @@ func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -16854,39 +15535,19 @@ func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Wri
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- if x.Shard_ID != nil {
- out.Base64Bytes(x.Shard_ID)
- } else {
- out.String("")
- }
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Shard_ID)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"success\":"
+ const prefix string = ",\"success\":"
out.RawString(prefix)
out.Bool(x.Success)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"error\":"
+ const prefix string = ",\"error\":"
out.RawString(prefix)
out.String(x.Error)
}
@@ -16921,13 +15582,7 @@ func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lex
case "shardID":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Shard_ID = f
}
case "success":
@@ -17037,16 +15692,10 @@ func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"results\":"
- out.RawString(prefix)
+ const prefix string = ",\"results\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Results {
if i != 0 {
@@ -17231,25 +15880,14 @@ func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -17303,727 +15941,3 @@ func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
in.Consumed()
}
}
-
-type ListShardsForObjectRequest_Body struct {
- ObjectId string `json:"objectId"`
- ContainerId string `json:"containerId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.StringSize(1, x.ObjectId)
- size += proto.StringSize(2, x.ContainerId)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.ObjectId) != 0 {
- mm.AppendString(1, x.ObjectId)
- }
- if len(x.ContainerId) != 0 {
- mm.AppendString(2, x.ContainerId)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ObjectId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ObjectId")
- }
- x.ObjectId = data
- case 2: // ContainerId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- }
- }
- return nil
-}
-func (x *ListShardsForObjectRequest_Body) GetObjectId() string {
- if x != nil {
- return x.ObjectId
- }
- return ""
-}
-func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) {
- x.ObjectId = v
-}
-func (x *ListShardsForObjectRequest_Body) GetContainerId() string {
- if x != nil {
- return x.ContainerId
- }
- return ""
-}
-func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) {
- x.ContainerId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"objectId\":"
- out.RawString(prefix)
- out.String(x.ObjectId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- out.String(x.ContainerId)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "objectId":
- {
- var f string
- f = in.String()
- x.ObjectId = f
- }
- case "containerId":
- {
- var f string
- f = in.String()
- x.ContainerId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectRequest struct {
- Body *ListShardsForObjectRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil)
- _ json.Marshaler = (*ListShardsForObjectRequest)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListShardsForObjectRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListShardsForObjectRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) {
- x.Body = v
-}
-func (x *ListShardsForObjectRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListShardsForObjectRequest_Body
- f = new(ListShardsForObjectRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectResponse_Body struct {
- Shard_ID [][]byte `json:"shardID"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.RepeatedBytesSize(1, x.Shard_ID)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- }
- }
- return nil
-}
-func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectResponse struct {
- Body *ListShardsForObjectResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil)
- _ json.Marshaler = (*ListShardsForObjectResponse)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListShardsForObjectResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListShardsForObjectResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) {
- x.Body = v
-}
-func (x *ListShardsForObjectResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListShardsForObjectResponse_Body
- f = new(ListShardsForObjectResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index 045662ccf..f5cfefa85 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -26,6 +26,7 @@ const (
ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
+ ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard"
ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation"
ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus"
ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus"
@@ -41,7 +42,6 @@ const (
ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache"
ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards"
ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild"
- ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject"
)
// ControlServiceClient is the client API for ControlService service.
@@ -62,6 +62,10 @@ type ControlServiceClient interface {
SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error)
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use
+ // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
@@ -96,8 +100,6 @@ type ControlServiceClient interface {
DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error)
// StartShardRebuild starts shard rebuild process.
StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error)
- // ListShardsForObject returns shard info where object is stored.
- ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error)
}
type controlServiceClient struct {
@@ -171,6 +173,15 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron
return out, nil
}
+func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) {
+ out := new(EvacuateShardResponse)
+ err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) {
out := new(StartShardEvacuationResponse)
err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...)
@@ -306,15 +317,6 @@ func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartS
return out, nil
}
-func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) {
- out := new(ListShardsForObjectResponse)
- err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
// ControlServiceServer is the server API for ControlService service.
// All implementations should embed UnimplementedControlServiceServer
// for forward compatibility
@@ -333,6 +335,10 @@ type ControlServiceServer interface {
SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error)
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use
+ // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
@@ -367,8 +373,6 @@ type ControlServiceServer interface {
DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error)
// StartShardRebuild starts shard rebuild process.
StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error)
- // ListShardsForObject returns shard info where object is stored.
- ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -396,6 +400,9 @@ func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShard
func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented")
}
+func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented")
+}
func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented")
}
@@ -441,9 +448,6 @@ func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachSh
func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented")
}
-func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented")
-}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -582,6 +586,24 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EvacuateShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).EvacuateShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_EvacuateShard_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StartShardEvacuationRequest)
if err := dec(in); err != nil {
@@ -852,24 +874,6 @@ func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
-func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListShardsForObjectRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).ListShardsForObject(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_ListShardsForObject_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -905,6 +909,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SynchronizeTree",
Handler: _ControlService_SynchronizeTree_Handler,
},
+ {
+ MethodName: "EvacuateShard",
+ Handler: _ControlService_EvacuateShard_Handler,
+ },
{
MethodName: "StartShardEvacuation",
Handler: _ControlService_StartShardEvacuation_Handler,
@@ -965,10 +973,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "StartShardRebuild",
Handler: _ControlService_StartShardRebuild_Handler,
},
- {
- MethodName: "ListShardsForObject",
- Handler: _ControlService_ListShardsForObject_Handler,
- },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 69d87292d..f92106589 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -5,9 +5,9 @@ package control
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -234,35 +234,16 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- if x.Key != nil {
- out.Base64Bytes(x.Key)
- } else {
- out.String("")
- }
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Key)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
- if x.Sign != nil {
- out.Base64Bytes(x.Sign)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Sign)
}
out.RawByte('}')
}
@@ -295,25 +276,13 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "key":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Key = f
}
case "signature":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Sign = f
}
}
@@ -445,35 +414,19 @@ func (x *NodeInfo_Attribute) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
out.String(x.Key)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"value\":"
+ const prefix string = ",\"value\":"
out.RawString(prefix)
out.String(x.Value)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parents\":"
+ const prefix string = ",\"parents\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Parents {
@@ -692,29 +645,14 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"publicKey\":"
- out.RawString(prefix)
- if x.PublicKey != nil {
- out.Base64Bytes(x.PublicKey)
- } else {
- out.String("")
- }
+ const prefix string = ",\"publicKey\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.PublicKey)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"addresses\":"
+ const prefix string = ",\"addresses\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Addresses {
@@ -726,12 +664,7 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"attributes\":"
+ const prefix string = ",\"attributes\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Attributes {
@@ -743,19 +676,9 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"state\":"
+ const prefix string = ",\"state\":"
out.RawString(prefix)
- v := int32(x.State)
- if vv, ok := NetmapStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ out.Int32(int32(x.State))
}
out.RawByte('}')
}
@@ -788,13 +711,7 @@ func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "publicKey":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.PublicKey = f
}
case "addresses":
@@ -961,27 +878,14 @@ func (x *Netmap) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"epoch\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10)
- out.RawByte('"')
+ const prefix string = ",\"epoch\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.Epoch)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodes\":"
+ const prefix string = ",\"nodes\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Nodes {
@@ -1023,15 +927,7 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "epoch":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.Epoch = f
}
case "nodes":
@@ -1283,39 +1179,19 @@ func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- if x.Shard_ID != nil {
- out.Base64Bytes(x.Shard_ID)
- } else {
- out.String("")
- }
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Shard_ID)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"metabasePath\":"
+ const prefix string = ",\"metabasePath\":"
out.RawString(prefix)
out.String(x.MetabasePath)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"blobstor\":"
+ const prefix string = ",\"blobstor\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Blobstor {
@@ -1327,57 +1203,27 @@ func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"writecachePath\":"
+ const prefix string = ",\"writecachePath\":"
out.RawString(prefix)
out.String(x.WritecachePath)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"mode\":"
+ const prefix string = ",\"mode\":"
out.RawString(prefix)
- v := int32(x.Mode)
- if vv, ok := ShardMode_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ out.Int32(int32(x.Mode))
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"errorCount\":"
+ const prefix string = ",\"errorCount\":"
out.RawString(prefix)
out.Uint32(x.ErrorCount)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"piloramaPath\":"
+ const prefix string = ",\"piloramaPath\":"
out.RawString(prefix)
out.String(x.PiloramaPath)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"evacuationInProgress\":"
+ const prefix string = ",\"evacuationInProgress\":"
out.RawString(prefix)
out.Bool(x.EvacuationInProgress)
}
@@ -1412,13 +1258,7 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "shardID":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Shard_ID = f
}
case "metabasePath":
@@ -1472,15 +1312,7 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "errorCount":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.ErrorCount = f
}
case "piloramaPath":
@@ -1604,25 +1436,14 @@ func (x *BlobstorInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"path\":"
- out.RawString(prefix)
+ const prefix string = ",\"path\":"
+ out.RawString(prefix[1:])
out.String(x.Path)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"type\":"
+ const prefix string = ",\"type\":"
out.RawString(prefix)
out.String(x.Type)
}
@@ -1816,30 +1637,14 @@ func (x *ChainTarget) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"type\":"
- out.RawString(prefix)
- v := int32(x.Type)
- if vv, ok := ChainTarget_TargetType_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ const prefix string = ",\"type\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.Type))
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"Name\":"
+ const prefix string = ",\"Name\":"
out.RawString(prefix)
out.String(x.Name)
}
diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go
index 1b92fdaad..496b07a98 100644
--- a/pkg/services/netmap/executor.go
+++ b/pkg/services/netmap/executor.go
@@ -5,11 +5,10 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
versionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
@@ -27,9 +26,9 @@ type executorSvc struct {
// NodeState encapsulates information
// about current node state.
type NodeState interface {
- // LocalNodeInfo must return current node state
+ // Must return current node state
// in FrostFS API v2 NodeInfo structure.
- LocalNodeInfo() *netmapSDK.NodeInfo
+ LocalNodeInfo() (*netmap.NodeInfo, error)
// ReadCurrentNetMap reads current local network map of the storage node
// into the given parameter. Returns any error encountered which prevented
@@ -40,19 +39,17 @@ type NodeState interface {
// NetworkInfo encapsulates source of the
// recent information about the FrostFS network.
type NetworkInfo interface {
- // Dump must return recent network information in FrostFS API v2 NetworkInfo structure.
+ // Must return recent network information in FrostFS API v2 NetworkInfo structure.
//
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
- Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error)
+ Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
}
func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server {
- // this should never happen, otherwise it's a programmer's bug
- msg := "BUG: can't create netmap execution service"
- assert.False(s == nil, msg, "node state is nil")
- assert.False(netInfo == nil, msg, "network info is nil")
- assert.False(respSvc == nil, msg, "response service is nil")
- assert.True(version.IsValid(v), msg, "invalid version")
+ if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil {
+ // this should never happen, otherwise it programmers bug
+ panic("can't create netmap execution service")
+ }
res := &executorSvc{
state: s,
@@ -67,15 +64,39 @@ func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo,
func (s *executorSvc) LocalNodeInfo(
_ context.Context,
- _ *netmap.LocalNodeInfoRequest,
+ req *netmap.LocalNodeInfoRequest,
) (*netmap.LocalNodeInfoResponse, error) {
- ni := s.state.LocalNodeInfo()
- var nodeInfo netmap.NodeInfo
- ni.WriteToV2(&nodeInfo)
+ verV2 := req.GetMetaHeader().GetVersion()
+ if verV2 == nil {
+ return nil, errors.New("missing version")
+ }
+
+ var ver versionsdk.Version
+ if err := ver.ReadFromV2(*verV2); err != nil {
+ return nil, fmt.Errorf("can't read version: %w", err)
+ }
+
+ ni, err := s.state.LocalNodeInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ if addrNum := ni.NumberOfAddresses(); addrNum > 0 && ver.Minor() <= 7 {
+ ni2 := new(netmap.NodeInfo)
+ ni2.SetPublicKey(ni.GetPublicKey())
+ ni2.SetState(ni.GetState())
+ ni2.SetAttributes(ni.GetAttributes())
+ ni.IterateAddresses(func(s string) bool {
+ ni2.SetAddresses(s)
+ return true
+ })
+
+ ni = ni2
+ }
body := new(netmap.LocalNodeInfoResponseBody)
body.SetVersion(&s.version)
- body.SetNodeInfo(&nodeInfo)
+ body.SetNodeInfo(ni)
resp := new(netmap.LocalNodeInfoResponse)
resp.SetBody(body)
@@ -85,7 +106,7 @@ func (s *executorSvc) LocalNodeInfo(
}
func (s *executorSvc) NetworkInfo(
- ctx context.Context,
+ _ context.Context,
req *netmap.NetworkInfoRequest,
) (*netmap.NetworkInfoResponse, error) {
verV2 := req.GetMetaHeader().GetVersion()
@@ -98,7 +119,7 @@ func (s *executorSvc) NetworkInfo(
return nil, fmt.Errorf("can't read version: %w", err)
}
- ni, err := s.netInfo.Dump(ctx, ver)
+ ni, err := s.netInfo.Dump(ver)
if err != nil {
return nil, err
}
diff --git a/pkg/services/netmap/server.go b/pkg/services/netmap/server.go
index eff880dbe..0a09c9f44 100644
--- a/pkg/services/netmap/server.go
+++ b/pkg/services/netmap/server.go
@@ -3,7 +3,7 @@ package netmap
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
)
// Server is an interface of the FrostFS API Netmap service server.
diff --git a/pkg/services/netmap/sign.go b/pkg/services/netmap/sign.go
index 5f184d5c0..9a16ad8f1 100644
--- a/pkg/services/netmap/sign.go
+++ b/pkg/services/netmap/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
)
type signService struct {
diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go
new file mode 100644
index 000000000..921545c8b
--- /dev/null
+++ b/pkg/services/object/acl/acl.go
@@ -0,0 +1,262 @@
+package acl
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "errors"
+ "fmt"
+ "io"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ eaclV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/eacl/v2"
+ v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
+ bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// Checker implements v2.ACLChecker interfaces and provides
+// ACL/eACL validation functionality.
+type Checker struct {
+ eaclSrc container.EACLSource
+ validator *eaclSDK.Validator
+ localStorage *engine.StorageEngine
+ state netmap.State
+}
+
+type localStorage struct {
+ ls *engine.StorageEngine
+}
+
+func (s *localStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ if s.ls == nil {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ return engine.Head(ctx, s.ls, addr)
+}
+
+// Various EACL check errors.
+var (
+ errEACLDeniedByRule = errors.New("denied by rule")
+ errBearerExpired = errors.New("bearer token has expired")
+ errBearerInvalidSignature = errors.New("bearer token has invalid signature")
+ errBearerInvalidContainerID = errors.New("bearer token was created for another container")
+ errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
+ errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
+)
+
+// NewChecker creates Checker.
+// Panics if at least one of the parameter is nil.
+func NewChecker(
+ state netmap.State,
+ eaclSrc container.EACLSource,
+ validator *eaclSDK.Validator,
+ localStorage *engine.StorageEngine,
+) *Checker {
+ return &Checker{
+ eaclSrc: eaclSrc,
+ validator: validator,
+ localStorage: localStorage,
+ state: state,
+ }
+}
+
+// CheckBasicACL is a main check function for basic ACL.
+func (c *Checker) CheckBasicACL(info v2.RequestInfo) bool {
+ // check basic ACL permissions
+ return info.BasicACL().IsOpAllowed(info.Operation(), info.RequestRole())
+}
+
+// StickyBitCheck validates owner field in the request if sticky bit is enabled.
+func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool {
+ // According to FrostFS specification sticky bit has no effect on system nodes
+ // for correct intra-container work with objects (in particular, replication).
+ if info.RequestRole() == acl.RoleContainer {
+ return true
+ }
+
+ if !info.BasicACL().Sticky() {
+ return true
+ }
+
+ if len(info.SenderKey()) == 0 {
+ return false
+ }
+
+ requestSenderKey := unmarshalPublicKey(info.SenderKey())
+
+ return isOwnerFromKey(owner, requestSenderKey)
+}
+
+// CheckEACL is a main check function for extended ACL.
+func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
+ basicACL := reqInfo.BasicACL()
+ if !basicACL.Extendable() {
+ return nil
+ }
+
+ bearerTok := reqInfo.Bearer()
+ impersonate := bearerTok != nil && bearerTok.Impersonate()
+
+ // if bearer token is not allowed, then ignore it
+ if impersonate || !basicACL.AllowedBearerRules(reqInfo.Operation()) {
+ reqInfo.CleanBearer()
+ }
+
+ var table eaclSDK.Table
+ cnr := reqInfo.ContainerID()
+
+ if bearerTok == nil {
+ eaclInfo, err := c.eaclSrc.GetEACL(cnr)
+ if err != nil {
+ if client.IsErrEACLNotFound(err) {
+ return nil
+ }
+ return err
+ }
+
+ table = *eaclInfo.Value
+ } else {
+ table = bearerTok.EACLTable()
+ }
+
+ // if bearer token is not present, isValidBearer returns true
+ if err := isValidBearer(reqInfo, c.state); err != nil {
+ return err
+ }
+
+ hdrSrc, err := c.getHeaderSource(cnr, msg, reqInfo)
+ if err != nil {
+ return err
+ }
+
+ eaclRole := getRole(reqInfo)
+
+ action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit).
+ WithRole(eaclRole).
+ WithOperation(eaclSDK.Operation(reqInfo.Operation())).
+ WithContainerID(&cnr).
+ WithSenderKey(reqInfo.SenderKey()).
+ WithHeaderSource(hdrSrc).
+ WithEACLTable(&table),
+ )
+
+ if action != eaclSDK.ActionAllow {
+ return errEACLDeniedByRule
+ }
+ return nil
+}
+
+func getRole(reqInfo v2.RequestInfo) eaclSDK.Role {
+ var eaclRole eaclSDK.Role
+ switch op := reqInfo.RequestRole(); op {
+ default:
+ eaclRole = eaclSDK.Role(op)
+ case acl.RoleOwner:
+ eaclRole = eaclSDK.RoleUser
+ case acl.RoleInnerRing, acl.RoleContainer:
+ eaclRole = eaclSDK.RoleSystem
+ case acl.RoleOthers:
+ eaclRole = eaclSDK.RoleOthers
+ }
+ return eaclRole
+}
+
+func (c *Checker) getHeaderSource(cnr cid.ID, msg any, reqInfo v2.RequestInfo) (eaclSDK.TypedHeaderSource, error) {
+ var xHeaderSource eaclV2.XHeaderSource
+ if req, ok := msg.(eaclV2.Request); ok {
+ xHeaderSource = eaclV2.NewRequestXHeaderSource(req)
+ } else {
+ xHeaderSource = eaclV2.NewResponseXHeaderSource(msg.(eaclV2.Response), reqInfo.Request().(eaclV2.Request))
+ }
+
+ hdrSrc, err := eaclV2.NewMessageHeaderSource(&localStorage{ls: c.localStorage}, xHeaderSource, cnr, eaclV2.WithOID(reqInfo.ObjectID()))
+ if err != nil {
+ return nil, fmt.Errorf("can't parse headers: %w", err)
+ }
+ return hdrSrc, nil
+}
+
+// isValidBearer checks whether bearer token was correctly signed by authorized
+// entity. This method might be defined on whole ACL service because it will
+// require fetching current epoch to check lifetime.
+func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error {
+ ownerCnr := reqInfo.ContainerOwner()
+
+ token := reqInfo.Bearer()
+
+ // 0. Check if bearer token is present in reqInfo.
+ if token == nil {
+ return nil
+ }
+
+ // 1. First check token lifetime. Simplest verification.
+ if token.InvalidAt(st.CurrentEpoch()) {
+ return errBearerExpired
+ }
+
+ // 2. Then check if bearer token is signed correctly.
+ if !token.VerifySignature() {
+ return errBearerInvalidSignature
+ }
+
+ // 3. Then check if container is either empty or equal to the container in the request.
+ cnr, isSet := token.EACLTable().CID()
+ if isSet && !cnr.Equals(reqInfo.ContainerID()) {
+ return errBearerInvalidContainerID
+ }
+
+ // 4. Then check if container owner signed this token.
+ if !bearerSDK.ResolveIssuer(*token).Equals(ownerCnr) {
+ // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
+ return errBearerNotSignedByOwner
+ }
+
+ // 5. Then check if request sender has rights to use this token.
+ var keySender frostfsecdsa.PublicKey
+
+ err := keySender.Decode(reqInfo.SenderKey())
+ if err != nil {
+ return fmt.Errorf("decode sender public key: %w", err)
+ }
+
+ var usrSender user.ID
+ user.IDFromKey(&usrSender, ecdsa.PublicKey(keySender))
+
+ if !token.AssertUser(usrSender) {
+ // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
+ return errBearerInvalidOwner
+ }
+
+ return nil
+}
+
+func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
+ if key == nil {
+ return false
+ }
+
+ var id2 user.ID
+ user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
+
+ return id.Equals(id2)
+}
+
+func unmarshalPublicKey(bs []byte) *keys.PublicKey {
+ pub, err := keys.NewPublicKeyFromBytes(bs, elliptic.P256())
+ if err != nil {
+ return nil
+ }
+ return pub
+}
diff --git a/pkg/services/object/acl/acl_test.go b/pkg/services/object/acl/acl_test.go
new file mode 100644
index 000000000..d63cb1285
--- /dev/null
+++ b/pkg/services/object/acl/acl_test.go
@@ -0,0 +1,89 @@
+package acl
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "github.com/stretchr/testify/require"
+)
+
+type emptyEACLSource struct{}
+
+func (e emptyEACLSource) GetEACL(_ cid.ID) (*container.EACL, error) {
+ return nil, nil
+}
+
+type emptyNetmapState struct{}
+
+func (e emptyNetmapState) CurrentEpoch() uint64 {
+ return 0
+}
+
+func TestStickyCheck(t *testing.T) {
+ checker := NewChecker(
+ emptyNetmapState{},
+ emptyEACLSource{},
+ eaclSDK.NewValidator(),
+ &engine.StorageEngine{})
+
+ t.Run("system role", func(t *testing.T) {
+ var info v2.RequestInfo
+
+ info.SetSenderKey(make([]byte, 33)) // any non-empty key
+ info.SetRequestRole(acl.RoleContainer)
+
+ require.True(t, checker.StickyBitCheck(info, usertest.ID()))
+
+ var basicACL acl.Basic
+ basicACL.MakeSticky()
+
+ info.SetBasicACL(basicACL)
+
+ require.True(t, checker.StickyBitCheck(info, usertest.ID()))
+ })
+
+ t.Run("owner ID and/or public key emptiness", func(t *testing.T) {
+ var info v2.RequestInfo
+
+ info.SetRequestRole(acl.RoleOthers) // should be non-system role
+
+ assertFn := func(isSticky, withKey, withOwner, expected bool) {
+ info := info
+ if isSticky {
+ var basicACL acl.Basic
+ basicACL.MakeSticky()
+
+ info.SetBasicACL(basicACL)
+ }
+
+ if withKey {
+ info.SetSenderKey(make([]byte, 33))
+ } else {
+ info.SetSenderKey(nil)
+ }
+
+ var ownerID user.ID
+
+ if withOwner {
+ ownerID = usertest.ID()
+ }
+
+ require.Equal(t, expected, checker.StickyBitCheck(info, ownerID))
+ }
+
+ assertFn(true, false, false, false)
+ assertFn(true, true, false, false)
+ assertFn(true, false, true, false)
+ assertFn(false, false, false, true)
+ assertFn(false, true, false, true)
+ assertFn(false, false, true, true)
+ assertFn(false, true, true, true)
+ })
+}
diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go
new file mode 100644
index 000000000..023b99239
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/eacl_test.go
@@ -0,0 +1,166 @@
+package v2
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "testing"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+type testLocalStorage struct {
+ t *testing.T
+
+ expAddr oid.Address
+
+ obj *objectSDK.Object
+
+ err error
+}
+
+func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
+ require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
+
+ return s.obj, s.err
+}
+
+func testXHeaders(strs ...string) []session.XHeader {
+ res := make([]session.XHeader, len(strs)/2)
+
+ for i := 0; i < len(strs); i += 2 {
+ res[i/2].SetKey(strs[i])
+ res[i/2].SetValue(strs[i+1])
+ }
+
+ return res
+}
+
+func TestHeadRequest(t *testing.T) {
+ req := new(objectV2.HeadRequest)
+
+ meta := new(session.RequestMetaHeader)
+ req.SetMetaHeader(meta)
+
+ body := new(objectV2.HeadRequestBody)
+ req.SetBody(body)
+
+ addr := oidtest.Address()
+
+ var addrV2 refs.Address
+ addr.WriteToV2(&addrV2)
+
+ body.SetAddress(&addrV2)
+
+ xKey := "x-key"
+ xVal := "x-val"
+ xHdrs := testXHeaders(
+ xKey, xVal,
+ )
+
+ meta.SetXHeaders(xHdrs)
+
+ obj := objectSDK.New()
+
+ attrKey := "attr_key"
+ attrVal := "attr_val"
+ var attr objectSDK.Attribute
+ attr.SetKey(attrKey)
+ attr.SetValue(attrVal)
+ obj.SetAttributes(attr)
+
+ table := new(eaclSDK.Table)
+
+ priv, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ senderKey := priv.PublicKey()
+
+ r := eaclSDK.NewRecord()
+ r.SetOperation(eaclSDK.OperationHead)
+ r.SetAction(eaclSDK.ActionDeny)
+ r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal)
+ r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal)
+ eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
+
+ table.AddRecord(r)
+
+ lStorage := &testLocalStorage{
+ t: t,
+ expAddr: addr,
+ obj: obj,
+ }
+
+ id := addr.Object()
+
+ newSource := func(t *testing.T) eaclSDK.TypedHeaderSource {
+ hdrSrc, err := NewMessageHeaderSource(
+ lStorage,
+ NewRequestXHeaderSource(req),
+ addr.Container(),
+ WithOID(&id))
+ require.NoError(t, err)
+ return hdrSrc
+ }
+
+ cnr := addr.Container()
+
+ unit := new(eaclSDK.ValidationUnit).
+ WithContainerID(&cnr).
+ WithOperation(eaclSDK.OperationHead).
+ WithSenderKey(senderKey.Bytes()).
+ WithEACLTable(table)
+
+ validator := eaclSDK.NewValidator()
+
+ checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t)))
+
+ meta.SetXHeaders(nil)
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ meta.SetXHeaders(xHdrs)
+
+ obj.SetAttributes()
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ lStorage.err = errors.New("any error")
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ r.SetAction(eaclSDK.ActionAllow)
+
+ rID := eaclSDK.NewRecord()
+ rID.SetOperation(eaclSDK.OperationHead)
+ rID.SetAction(eaclSDK.ActionDeny)
+ rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object())
+ eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
+
+ table = eaclSDK.NewTable()
+ table.AddRecord(r)
+ table.AddRecord(rID)
+
+ unit.WithEACLTable(table)
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+}
+
+func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
+ actual, fromRule := v.CalculateAction(u)
+ require.True(t, fromRule)
+ require.Equal(t, expected, actual)
+}
+
+func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
+ actual, fromRule := v.CalculateAction(u)
+ require.False(t, fromRule)
+ require.Equal(t, eaclSDK.ActionAllow, actual)
+}
diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go
new file mode 100644
index 000000000..34975e1e6
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/headers.go
@@ -0,0 +1,246 @@
+package v2
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+type Option func(*cfg)
+
+type cfg struct {
+ storage ObjectStorage
+
+ msg XHeaderSource
+
+ cnr cid.ID
+ obj *oid.ID
+}
+
+type ObjectStorage interface {
+ Head(context.Context, oid.Address) (*objectSDK.Object, error)
+}
+
+type Request interface {
+ GetMetaHeader() *session.RequestMetaHeader
+}
+
+type Response interface {
+ GetMetaHeader() *session.ResponseMetaHeader
+}
+
+type headerSource struct {
+ requestHeaders []eaclSDK.Header
+ objectHeaders []eaclSDK.Header
+
+ incompleteObjectHeaders bool
+}
+
+func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) {
+ cfg := &cfg{
+ storage: os,
+ cnr: cnrID,
+ msg: xhs,
+ }
+
+ for i := range opts {
+ opts[i](cfg)
+ }
+
+ if cfg.msg == nil {
+ return nil, errors.New("message is not provided")
+ }
+
+ var res headerSource
+
+ err := cfg.readObjectHeaders(&res)
+ if err != nil {
+ return nil, err
+ }
+
+ res.requestHeaders = cfg.msg.GetXHeaders()
+
+ return res, nil
+}
+
+func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) {
+ switch typ {
+ default:
+ return nil, true
+ case eaclSDK.HeaderFromRequest:
+ return h.requestHeaders, true
+ case eaclSDK.HeaderFromObject:
+ return h.objectHeaders, !h.incompleteObjectHeaders
+ }
+}
+
+type xHeader session.XHeader
+
+func (x xHeader) Key() string {
+ return (*session.XHeader)(&x).GetKey()
+}
+
+func (x xHeader) Value() string {
+ return (*session.XHeader)(&x).GetValue()
+}
+
+var errMissingOID = errors.New("object ID is missing")
+
+func (h *cfg) readObjectHeaders(dst *headerSource) error {
+ switch m := h.msg.(type) {
+ default:
+ panic(fmt.Sprintf("unexpected message type %T", h.msg))
+ case requestXHeaderSource:
+ return h.readObjectHeadersFromRequestXHeaderSource(m, dst)
+ case responseXHeaderSource:
+ return h.readObjectHeadersResponseXHeaderSource(m, dst)
+ }
+}
+
+func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error {
+ switch req := m.req.(type) {
+ case
+ *objectV2.GetRequest,
+ *objectV2.HeadRequest:
+ if h.obj == nil {
+ return errMissingOID
+ }
+
+ objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+
+ dst.objectHeaders = objHeaders
+ dst.incompleteObjectHeaders = !completed
+ case
+ *objectV2.GetRangeRequest,
+ *objectV2.GetRangeHashRequest,
+ *objectV2.DeleteRequest:
+ if h.obj == nil {
+ return errMissingOID
+ }
+
+ dst.objectHeaders = addressHeaders(h.cnr, h.obj)
+ case *objectV2.PutRequest:
+ if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
+ oV2 := new(objectV2.Object)
+ oV2.SetObjectID(v.GetObjectID())
+ oV2.SetHeader(v.GetHeader())
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ case *objectV2.PutSingleRequest:
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj)
+ case *objectV2.SearchRequest:
+ cnrV2 := req.GetBody().GetContainerID()
+ var cnr cid.ID
+
+ if cnrV2 != nil {
+ if err := cnr.ReadFromV2(*cnrV2); err != nil {
+ return fmt.Errorf("can't parse container ID: %w", err)
+ }
+ }
+
+ dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
+ }
+ return nil
+}
+
+func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error {
+ switch resp := m.resp.(type) {
+ default:
+ objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+
+ dst.objectHeaders = objectHeaders
+ dst.incompleteObjectHeaders = !completed
+ case *objectV2.GetResponse:
+ if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
+ oV2 := new(objectV2.Object)
+ oV2.SetObjectID(v.GetObjectID())
+ oV2.SetHeader(v.GetHeader())
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ case *objectV2.HeadResponse:
+ oV2 := new(objectV2.Object)
+
+ var hdr *objectV2.Header
+
+ switch v := resp.GetBody().GetHeaderPart().(type) {
+ case *objectV2.ShortHeader:
+ hdr = new(objectV2.Header)
+
+ var idV2 refsV2.ContainerID
+ h.cnr.WriteToV2(&idV2)
+
+ hdr.SetContainerID(&idV2)
+ hdr.SetVersion(v.GetVersion())
+ hdr.SetCreationEpoch(v.GetCreationEpoch())
+ hdr.SetOwnerID(v.GetOwnerID())
+ hdr.SetObjectType(v.GetObjectType())
+ hdr.SetPayloadLength(v.GetPayloadLength())
+ case *objectV2.HeaderWithSignature:
+ hdr = v.GetHeader()
+ }
+
+ oV2.SetHeader(hdr)
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ return nil
+}
+
+func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) {
+ if idObj != nil {
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(*idObj)
+
+ obj, err := h.storage.Head(context.TODO(), addr)
+ if err == nil {
+ return headersFromObject(obj, cnr, idObj), true
+ }
+ }
+
+ return addressHeaders(cnr, idObj), false
+}
+
+func cidHeader(idCnr cid.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectContainerID,
+ v: idCnr.EncodeToString(),
+ }
+}
+
+func oidHeader(obj oid.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectID,
+ v: obj.EncodeToString(),
+ }
+}
+
+func ownerIDHeader(ownerID user.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectOwnerID,
+ v: ownerID.EncodeToString(),
+ }
+}
+
+func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
+ hh := make([]eaclSDK.Header, 0, 2)
+ hh = append(hh, cidHeader(cnr))
+
+ if oid != nil {
+ hh = append(hh, oidHeader(*oid))
+ }
+
+ return hh
+}
diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go
new file mode 100644
index 000000000..72bd4c2d2
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/object.go
@@ -0,0 +1,92 @@
+package v2
+
+import (
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type sysObjHdr struct {
+ k, v string
+}
+
+func (s sysObjHdr) Key() string {
+ return s.k
+}
+
+func (s sysObjHdr) Value() string {
+ return s.v
+}
+
+func u64Value(v uint64) string {
+ return strconv.FormatUint(v, 10)
+}
+
+func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
+ var count int
+ for obj := obj; obj != nil; obj = obj.Parent() {
+ count += 9 + len(obj.Attributes())
+ }
+
+ res := make([]eaclSDK.Header, 0, count)
+ for ; obj != nil; obj = obj.Parent() {
+ res = append(res,
+ cidHeader(cnr),
+ // creation epoch
+ sysObjHdr{
+ k: acl.FilterObjectCreationEpoch,
+ v: u64Value(obj.CreationEpoch()),
+ },
+ // payload size
+ sysObjHdr{
+ k: acl.FilterObjectPayloadLength,
+ v: u64Value(obj.PayloadSize()),
+ },
+ // object version
+ sysObjHdr{
+ k: acl.FilterObjectVersion,
+ v: obj.Version().String(),
+ },
+ // object type
+ sysObjHdr{
+ k: acl.FilterObjectType,
+ v: obj.Type().String(),
+ },
+ )
+
+ if oid != nil {
+ res = append(res, oidHeader(*oid))
+ }
+
+ if idOwner := obj.OwnerID(); !idOwner.IsEmpty() {
+ res = append(res, ownerIDHeader(idOwner))
+ }
+
+ cs, ok := obj.PayloadChecksum()
+ if ok {
+ res = append(res, sysObjHdr{
+ k: acl.FilterObjectPayloadHash,
+ v: cs.String(),
+ })
+ }
+
+ cs, ok = obj.PayloadHomomorphicHash()
+ if ok {
+ res = append(res, sysObjHdr{
+ k: acl.FilterObjectHomomorphicHash,
+ v: cs.String(),
+ })
+ }
+
+ attrs := obj.Attributes()
+ for i := range attrs {
+ res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface
+ }
+ }
+
+ return res
+}
diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go
new file mode 100644
index 000000000..d91a21c75
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/opts.go
@@ -0,0 +1,11 @@
+package v2
+
+import (
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func WithOID(v *oid.ID) Option {
+ return func(c *cfg) {
+ c.obj = v
+ }
+}
diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go
new file mode 100644
index 000000000..c1fdea9d8
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/xheader.go
@@ -0,0 +1,69 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+)
+
+type XHeaderSource interface {
+ GetXHeaders() []eaclSDK.Header
+}
+
+type requestXHeaderSource struct {
+ req Request
+}
+
+func NewRequestXHeaderSource(req Request) XHeaderSource {
+ return requestXHeaderSource{req: req}
+}
+
+type responseXHeaderSource struct {
+ resp Response
+
+ req Request
+}
+
+func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource {
+ return responseXHeaderSource{resp: resp, req: req}
+}
+
+func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header {
+ ln := 0
+
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ ln += len(meta.GetXHeaders())
+ }
+
+ res := make([]eaclSDK.Header, 0, ln)
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ x := meta.GetXHeaders()
+ for i := range x {
+ res = append(res, (xHeader)(x[i]))
+ }
+ }
+
+ return res
+}
+
+func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header {
+ ln := 0
+ xHdrs := make([][]session.XHeader, 0)
+
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ x := meta.GetXHeaders()
+
+ ln += len(x)
+
+ xHdrs = append(xHdrs, x)
+ }
+
+ res := make([]eaclSDK.Header, 0, ln)
+
+ for i := range xHdrs {
+ for j := range xHdrs[i] {
+ res = append(res, xHeader(xHdrs[i][j]))
+ }
+ }
+
+ return res
+}
diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go
new file mode 100644
index 000000000..11b9e6e5f
--- /dev/null
+++ b/pkg/services/object/acl/v2/errors.go
@@ -0,0 +1,41 @@
+package v2
+
+import (
+ "fmt"
+
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+)
+
+const invalidRequestMessage = "malformed request"
+
+func malformedRequestError(reason string) error {
+ return fmt.Errorf("%s: %s", invalidRequestMessage, reason)
+}
+
+var (
+ errEmptyBody = malformedRequestError("empty body")
+ errEmptyVerificationHeader = malformedRequestError("empty verification header")
+ errEmptyBodySig = malformedRequestError("empty at body signature")
+ errInvalidSessionSig = malformedRequestError("invalid session token signature")
+ errInvalidSessionOwner = malformedRequestError("invalid session token owner")
+ errInvalidVerb = malformedRequestError("session token verb is invalid")
+)
+
+const (
+ accessDeniedACLReasonFmt = "access to operation %s is denied by basic ACL check"
+ accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v"
+)
+
+func basicACLErr(info RequestInfo) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedACLReasonFmt, info.operation))
+
+ return errAccessDenied
+}
+
+func eACLErr(info RequestInfo, err error) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err))
+
+ return errAccessDenied
+}
diff --git a/pkg/services/object/acl/v2/errors_test.go b/pkg/services/object/acl/v2/errors_test.go
new file mode 100644
index 000000000..2d2b7bc8d
--- /dev/null
+++ b/pkg/services/object/acl/v2/errors_test.go
@@ -0,0 +1,30 @@
+package v2
+
+import (
+ "errors"
+ "testing"
+
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBasicACLErr(t *testing.T) {
+ var reqInfo RequestInfo
+ err := basicACLErr(reqInfo)
+
+ var errAccessDenied *apistatus.ObjectAccessDenied
+
+ require.ErrorAs(t, err, &errAccessDenied,
+ "basicACLErr must be able to be casted to apistatus.ObjectAccessDenied")
+}
+
+func TestEACLErr(t *testing.T) {
+ var reqInfo RequestInfo
+ testErr := errors.New("test-eacl")
+ err := eACLErr(reqInfo, testErr)
+
+ var errAccessDenied *apistatus.ObjectAccessDenied
+
+ require.ErrorAs(t, err, &errAccessDenied,
+ "eACLErr must be able to be casted to apistatus.ObjectAccessDenied")
+}
diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go
new file mode 100644
index 000000000..15fcce884
--- /dev/null
+++ b/pkg/services/object/acl/v2/opts.go
@@ -0,0 +1,12 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+)
+
+// WithLogger returns option to set logger.
+func WithLogger(v *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = v
+ }
+}
diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go
new file mode 100644
index 000000000..74279e453
--- /dev/null
+++ b/pkg/services/object/acl/v2/request.go
@@ -0,0 +1,159 @@
+package v2
+
+import (
+ "crypto/ecdsa"
+ "fmt"
+
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// RequestInfo groups parsed version-independent (from SDK library)
+// request information and raw API request.
+type RequestInfo struct {
+ basicACL acl.Basic
+ requestRole acl.Role
+ operation acl.Op // put, get, head, etc.
+ cnrOwner user.ID // container owner
+
+ // cnrNamespace defined to which namespace a container is belonged.
+ cnrNamespace string
+
+ idCnr cid.ID
+
+ // optional for some request
+ // e.g. Put, Search
+ obj *oid.ID
+
+ senderKey []byte
+
+ bearer *bearer.Token // bearer token of request
+
+ srcRequest any
+}
+
+func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) {
+ r.basicACL = basicACL
+}
+
+func (r *RequestInfo) SetRequestRole(requestRole acl.Role) {
+ r.requestRole = requestRole
+}
+
+func (r *RequestInfo) SetSenderKey(senderKey []byte) {
+ r.senderKey = senderKey
+}
+
+// Request returns raw API request.
+func (r RequestInfo) Request() any {
+ return r.srcRequest
+}
+
+// ContainerOwner returns owner if the container.
+func (r RequestInfo) ContainerOwner() user.ID {
+ return r.cnrOwner
+}
+
+func (r RequestInfo) ContainerNamespace() string {
+ return r.cnrNamespace
+}
+
+// ObjectID return object ID.
+func (r RequestInfo) ObjectID() *oid.ID {
+ return r.obj
+}
+
+// ContainerID return container ID.
+func (r RequestInfo) ContainerID() cid.ID {
+ return r.idCnr
+}
+
+// CleanBearer forces cleaning bearer token information.
+func (r *RequestInfo) CleanBearer() {
+ r.bearer = nil
+}
+
+// Bearer returns bearer token of the request.
+func (r RequestInfo) Bearer() *bearer.Token {
+ return r.bearer
+}
+
+// BasicACL returns basic ACL of the container.
+func (r RequestInfo) BasicACL() acl.Basic {
+ return r.basicACL
+}
+
+// SenderKey returns public key of the request's sender.
+func (r RequestInfo) SenderKey() []byte {
+ return r.senderKey
+}
+
+// Operation returns request's operation.
+func (r RequestInfo) Operation() acl.Op {
+ return r.operation
+}
+
+// RequestRole returns request sender's role.
+func (r RequestInfo) RequestRole() acl.Role {
+ return r.requestRole
+}
+
+// IsSoftAPECheck states if APE should perform soft checks.
+// Soft APE check allows a request if CheckAPE returns NoRuleFound for it,
+// otherwise it denies the request.
+func (r RequestInfo) IsSoftAPECheck() bool {
+ return r.BasicACL().Bits() != 0
+}
+
+// MetaWithToken groups session and bearer tokens,
+// verification header and raw API request.
+type MetaWithToken struct {
+ vheader *sessionV2.RequestVerificationHeader
+ token *sessionSDK.Object
+ bearer *bearer.Token
+ src any
+}
+
+// RequestOwner returns ownerID and its public key
+// according to internal meta information.
+func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) {
+ if r.vheader == nil {
+ return nil, nil, errEmptyVerificationHeader
+ }
+
+ if r.bearer != nil && r.bearer.Impersonate() {
+ return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes())
+ }
+
+ // if session token is presented, use it as truth source
+ if r.token != nil {
+ // verify signature of session token
+ return ownerFromToken(r.token)
+ }
+
+ // otherwise get original body signature
+ bodySignature := originalBodySignature(r.vheader)
+ if bodySignature == nil {
+ return nil, nil, errEmptyBodySig
+ }
+
+ return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
+}
+
+func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
+ key, err := unmarshalPublicKey(rawKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var idSender user.ID
+ user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
+
+ return &idSender, key, nil
+}
diff --git a/pkg/services/object/ape/metadata_test.go b/pkg/services/object/acl/v2/request_test.go
similarity index 79%
rename from pkg/services/object/ape/metadata_test.go
rename to pkg/services/object/acl/v2/request_test.go
index fd919008f..980d1a2e5 100644
--- a/pkg/services/object/ape/metadata_test.go
+++ b/pkg/services/object/acl/v2/request_test.go
@@ -1,11 +1,11 @@
-package ape
+package v2
import (
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/signature"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) {
vh.SetBodySignature(&userSignature)
t.Run("empty verification header", func(t *testing.T) {
- req := Metadata{}
+ req := MetaWithToken{}
checkOwner(t, req, nil, errEmptyVerificationHeader)
})
t.Run("empty verification header signature", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: new(sessionV2.RequestVerificationHeader),
+ req := MetaWithToken{
+ vheader: new(sessionV2.RequestVerificationHeader),
}
checkOwner(t, req, nil, errEmptyBodySig)
})
t.Run("no tokens", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
+ req := MetaWithToken{
+ vheader: vh,
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer without impersonate, no session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, false),
+ req := MetaWithToken{
+ vheader: vh,
+ bearer: newBearer(t, containerOwner, userID, false),
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer with impersonate, no session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, true),
+ req := MetaWithToken{
+ vheader: vh,
+ bearer: newBearer(t, containerOwner, userID, true),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, true),
- SessionToken: newSession(t, pk),
+ req := MetaWithToken{
+ vheader: vh,
+ bearer: newBearer(t, containerOwner, userID, true),
+ token: newSession(t, pk),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
t.Run("with session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- SessionToken: newSession(t, containerOwner),
+ req := MetaWithToken{
+ vheader: vh,
+ token: newSession(t, containerOwner),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) {
var tok sessionSDK.Object
require.NoError(t, tok.ReadFromV2(tokV2))
- req := Metadata{
- VerificationHeader: vh,
- SessionToken: &tok,
+ req := MetaWithToken{
+ vheader: vh,
+ token: &tok,
}
checkOwner(t, req, nil, errInvalidSessionOwner)
})
@@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool
return &tok
}
-func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) {
+func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) {
_, actual, err := req.RequestOwner()
if expectedErr != nil {
require.ErrorIs(t, err, expectedErr)
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
new file mode 100644
index 000000000..5a8e8b065
--- /dev/null
+++ b/pkg/services/object/acl/v2/service.go
@@ -0,0 +1,919 @@
+package v2
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "go.uber.org/zap"
+)
+
+// Service checks basic ACL rules.
+type Service struct {
+ *cfg
+
+ c objectCore.SenderClassifier
+}
+
+type putStreamBasicChecker struct {
+ source *Service
+ next object.PutObjectStream
+}
+
+type patchStreamBasicChecker struct {
+ source *Service
+ next object.PatchObjectStream
+ nonFirstSend bool
+}
+
+type getStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.GetObjectStream
+
+ info RequestInfo
+}
+
+type rangeStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.GetObjectRangeStream
+
+ info RequestInfo
+}
+
+type searchStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.SearchStream
+
+ info RequestInfo
+}
+
+// Option represents Service constructor option.
+type Option func(*cfg)
+
+type cfg struct {
+ log *logger.Logger
+
+ containers container.Source
+
+ checker ACLChecker
+
+ irFetcher InnerRingFetcher
+
+ nm netmap.Source
+
+ next object.ServiceServer
+}
+
+// New is a constructor for object ACL checking service.
+func New(next object.ServiceServer,
+ nm netmap.Source,
+ irf InnerRingFetcher,
+ acl ACLChecker,
+ cs container.Source,
+ opts ...Option,
+) Service {
+ cfg := &cfg{
+ log: &logger.Logger{Logger: zap.L()},
+ next: next,
+ nm: nm,
+ irFetcher: irf,
+ checker: acl,
+ containers: cs,
+ }
+
+ for i := range opts {
+ opts[i](cfg)
+ }
+
+ return Service{
+ cfg: cfg,
+ c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
+ }
+}
+
+// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context.
+// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
+type wrappedGetObjectStream struct {
+ object.GetObjectStream
+
+ requestInfo RequestInfo
+}
+
+func (w *wrappedGetObjectStream) Context() context.Context {
+ return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{
+ Namespace: w.requestInfo.ContainerNamespace(),
+ ContainerOwner: w.requestInfo.ContainerOwner(),
+ SenderKey: w.requestInfo.SenderKey(),
+ Role: w.requestInfo.RequestRole(),
+ SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
+ BearerToken: w.requestInfo.Bearer(),
+ })
+}
+
+func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream {
+ return &wrappedGetObjectStream{
+ GetObjectStream: getObjectStream,
+ requestInfo: reqInfo,
+ }
+}
+
+// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context.
+// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
+type wrappedRangeStream struct {
+ object.GetObjectRangeStream
+
+ requestInfo RequestInfo
+}
+
+func (w *wrappedRangeStream) Context() context.Context {
+ return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{
+ Namespace: w.requestInfo.ContainerNamespace(),
+ ContainerOwner: w.requestInfo.ContainerOwner(),
+ SenderKey: w.requestInfo.SenderKey(),
+ Role: w.requestInfo.RequestRole(),
+ SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
+ BearerToken: w.requestInfo.Bearer(),
+ })
+}
+
+func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream {
+ return &wrappedRangeStream{
+ GetObjectRangeStream: rangeStream,
+ requestInfo: reqInfo,
+ }
+}
+
+// wrappedSearchStream propagates RequestContext into SearchStream's context.
+// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
+type wrappedSearchStream struct {
+ object.SearchStream
+
+ requestInfo RequestInfo
+}
+
+func (w *wrappedSearchStream) Context() context.Context {
+ return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{
+ Namespace: w.requestInfo.ContainerNamespace(),
+ ContainerOwner: w.requestInfo.ContainerOwner(),
+ SenderKey: w.requestInfo.SenderKey(),
+ Role: w.requestInfo.RequestRole(),
+ SoftAPECheck: w.requestInfo.IsSoftAPECheck(),
+ BearerToken: w.requestInfo.Bearer(),
+ })
+}
+
+func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream {
+ return &wrappedSearchStream{
+ SearchStream: searchStream,
+ requestInfo: reqInfo,
+ }
+}
+
+// Get implements ServiceServer interface, makes ACL checks and calls
+// next Get method in the ServiceServer pipeline.
+func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.Get(request, &getStreamBasicChecker{
+ GetObjectStream: newWrappedGetObjectStreamStream(stream, reqInfo),
+ info: reqInfo,
+ checker: b.checker,
+ })
+}
+
+func (b Service) Put() (object.PutObjectStream, error) {
+ streamer, err := b.next.Put()
+
+ return putStreamBasicChecker{
+ source: &b,
+ next: streamer,
+ }, err
+}
+
+func (b Service) Patch() (object.PatchObjectStream, error) {
+ streamer, err := b.next.Patch()
+
+ return &patchStreamBasicChecker{
+ source: &b,
+ next: streamer,
+ }, err
+}
+
+func (b Service) Head(
+ ctx context.Context,
+ request *objectV2.HeadRequest,
+) (*objectV2.HeadResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+ }
+
+ resp, err := b.next.Head(requestContext(ctx, reqInfo), request)
+ if err == nil {
+ if err = b.checker.CheckEACL(resp, reqInfo); err != nil {
+ err = eACLErr(reqInfo, err)
+ }
+ }
+
+ return resp, err
+}
+
+func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
+ id, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, id, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch)
+ if err != nil {
+ return err
+ }
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.Search(request, &searchStreamBasicChecker{
+ checker: b.checker,
+ SearchStream: newWrappedSearchStream(stream, reqInfo),
+ info: reqInfo,
+ })
+}
+
+func (b Service) Delete(
+ ctx context.Context,
+ request *objectV2.DeleteRequest,
+) (*objectV2.DeleteResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.Delete(requestContext(ctx, reqInfo), request)
+}
+
+func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.GetRange(request, &rangeStreamBasicChecker{
+ checker: b.checker,
+ GetObjectRangeStream: newWrappedRangeStream(stream, reqInfo),
+ info: reqInfo,
+ })
+}
+
+func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context {
+ return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{
+ Namespace: reqInfo.ContainerNamespace(),
+ ContainerOwner: reqInfo.ContainerOwner(),
+ SenderKey: reqInfo.SenderKey(),
+ Role: reqInfo.RequestRole(),
+ SoftAPECheck: reqInfo.IsSoftAPECheck(),
+ BearerToken: reqInfo.Bearer(),
+ })
+}
+
+func (b Service) GetRangeHash(
+ ctx context.Context,
+ request *objectV2.GetRangeHashRequest,
+) (*objectV2.GetRangeHashResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.GetRangeHash(requestContext(ctx, reqInfo), request)
+}
+
+func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID()
+ if idV2 == nil {
+ return nil, errors.New("missing object owner")
+ }
+
+ var idOwner user.ID
+
+ err = idOwner.ReadFromV2(*idV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid object owner: %w", err)
+ }
+
+ obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID())
+ if err != nil {
+ return nil, err
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return nil, err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectPut)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !b.checker.CheckBasicACL(reqInfo) || !b.checker.StickyBitCheck(reqInfo, idOwner) {
+ return nil, basicACLErr(reqInfo)
+ }
+ if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+ }
+
+ return b.next.PutSingle(requestContext(ctx, reqInfo), request)
+}
+
+func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
+ body := request.GetBody()
+ if body == nil {
+ return errEmptyBody
+ }
+
+ part := body.GetObjectPart()
+ if part, ok := part.(*objectV2.PutObjectPartInit); ok {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ idV2 := part.GetHeader().GetOwnerID()
+ if idV2 == nil {
+ return errors.New("missing object owner")
+ }
+
+ var idOwner user.ID
+
+ err = idOwner.ReadFromV2(*idV2)
+ if err != nil {
+ return fmt.Errorf("invalid object owner: %w", err)
+ }
+
+ objV2 := part.GetObjectID()
+ var obj *oid.ID
+
+ if objV2 != nil {
+ obj = new(oid.ID)
+
+ err = obj.ReadFromV2(*objV2)
+ if err != nil {
+ return err
+ }
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if reqInfo.IsSoftAPECheck() {
+ if !p.source.checker.CheckBasicACL(reqInfo) || !p.source.checker.StickyBitCheck(reqInfo, idOwner) {
+ return basicACLErr(reqInfo)
+ }
+ }
+
+ ctx = requestContext(ctx, reqInfo)
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
+ var sTok *sessionSDK.Object
+
+ if tokV2 != nil {
+ sTok = new(sessionSDK.Object)
+
+ err := sTok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
+ // if session relates to object's removal, we don't check
+ // relation of the tombstone to the session here since user
+ // can't predict tomb's ID.
+ err = assertSessionRelation(*sTok, cnr, nil)
+ } else {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return sTok, nil
+}
+
+func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
+ if _, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+ }
+
+ return g.GetObjectStream.Send(resp)
+}
+
+func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+
+ return g.GetObjectRangeStream.Send(resp)
+}
+
+func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+
+ return g.SearchStream.Send(resp)
+}
+
+func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
+ body := request.GetBody()
+ if body == nil {
+ return errEmptyBody
+ }
+
+ if !p.nonFirstSend {
+ p.nonFirstSend = true
+
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ objV2 := request.GetBody().GetAddress().GetObjectID()
+ if objV2 == nil {
+ return errors.New("missing oid")
+ }
+ obj := new(oid.ID)
+ err = obj.ReadFromV2(*objV2)
+ if err != nil {
+ return err
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(req, cnr)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ ctx = requestContext(ctx, reqInfo)
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
+ cnr, err := b.containers.Get(idCnr) // fetch actual container
+ if err != nil {
+ return info, err
+ }
+
+ if req.token != nil {
+ currentEpoch, err := b.nm.Epoch()
+ if err != nil {
+ return info, errors.New("can't fetch current epoch")
+ }
+ if req.token.ExpiredAt(currentEpoch) {
+ return info, new(apistatus.SessionTokenExpired)
+ }
+ if req.token.InvalidAt(currentEpoch) {
+ return info, fmt.Errorf("%s: token is invalid at %d epoch)",
+ invalidRequestMessage, currentEpoch)
+ }
+
+ if !assertVerb(*req.token, op) {
+ return info, errInvalidVerb
+ }
+ }
+
+ // find request role and key
+ ownerID, ownerKey, err := req.RequestOwner()
+ if err != nil {
+ return info, err
+ }
+ res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
+ if err != nil {
+ return info, err
+ }
+
+ info.basicACL = cnr.Value.BasicACL()
+ info.requestRole = res.Role
+ info.operation = op
+ info.cnrOwner = cnr.Value.Owner()
+ info.idCnr = idCnr
+
+ cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
+ if hasNamespace {
+ info.cnrNamespace = cnrNamespace
+ }
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ info.senderKey = res.Key
+
+ // add bearer token if it is present in request
+ info.bearer = req.bearer
+
+ info.srcRequest = req.src
+
+ return info, nil
+}
+
+// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert.
+func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) {
+ cnr, err := b.containers.Get(idCnr) // fetch actual container
+ if err != nil {
+ return info, err
+ }
+
+ if req.token != nil {
+ currentEpoch, err := b.nm.Epoch()
+ if err != nil {
+ return info, errors.New("can't fetch current epoch")
+ }
+ if req.token.ExpiredAt(currentEpoch) {
+ return info, new(apistatus.SessionTokenExpired)
+ }
+ if req.token.InvalidAt(currentEpoch) {
+ return info, fmt.Errorf("%s: token is invalid at %d epoch)",
+ invalidRequestMessage, currentEpoch)
+ }
+ }
+
+ // find request role and key
+ ownerID, ownerKey, err := req.RequestOwner()
+ if err != nil {
+ return info, err
+ }
+ res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
+ if err != nil {
+ return info, err
+ }
+
+ info.basicACL = cnr.Value.BasicACL()
+ info.requestRole = res.Role
+ info.cnrOwner = cnr.Value.Owner()
+ info.idCnr = idCnr
+
+ cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
+ if hasNamespace {
+ info.cnrNamespace = cnrNamespace
+ }
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ info.senderKey = res.Key
+
+ // add bearer token if it is present in request
+ info.bearer = req.bearer
+
+ info.srcRequest = req.src
+
+ return info, nil
+}
diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go
new file mode 100644
index 000000000..061cd26b6
--- /dev/null
+++ b/pkg/services/object/acl/v2/types.go
@@ -0,0 +1,28 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+// ACLChecker is an interface that must provide
+// ACL related checks.
+type ACLChecker interface {
+ // CheckBasicACL must return true only if request
+ // passes basic ACL validation.
+ CheckBasicACL(RequestInfo) bool
+ // CheckEACL must return non-nil error if request
+ // doesn't pass extended ACL validation.
+ CheckEACL(any, RequestInfo) error
+ // StickyBitCheck must return true only if sticky bit
+ // is disabled or enabled but request contains correct
+ // owner field.
+ StickyBitCheck(RequestInfo, user.ID) bool
+}
+
+// InnerRingFetcher is an interface that must provide
+// Inner Ring information.
+type InnerRingFetcher interface {
+ // InnerRingKeys must return list of public keys of
+ // the actual inner ring.
+ InnerRingKeys() ([][]byte, error)
+}
diff --git a/pkg/services/object/ape/util.go b/pkg/services/object/acl/v2/util.go
similarity index 56%
rename from pkg/services/object/ape/util.go
rename to pkg/services/object/acl/v2/util.go
index 5cd2caa50..c5225e8c4 100644
--- a/pkg/services/object/ape/util.go
+++ b/pkg/services/object/acl/v2/util.go
@@ -1,4 +1,4 @@
-package ape
+package v2
import (
"crypto/ecdsa"
@@ -6,34 +6,57 @@ import (
"errors"
"fmt"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
- if cidV2 != nil {
- if err = cnrID.ReadFromV2(*cidV2); err != nil {
- return
+var errMissingContainerID = errors.New("missing container ID")
+
+func getContainerIDFromRequest(req any) (cid.ID, error) {
+ var idV2 *refsV2.ContainerID
+ var id cid.ID
+
+ switch v := req.(type) {
+ case *objectV2.GetRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.PutRequest:
+ part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit)
+ if !ok {
+ return cid.ID{}, errors.New("can't get container ID in chunk")
}
- } else {
- err = errMissingContainerID
- return
+
+ idV2 = part.GetHeader().GetContainerID()
+ case *objectV2.HeadRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.SearchRequest:
+ idV2 = v.GetBody().GetContainerID()
+ case *objectV2.DeleteRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.GetRangeRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.GetRangeHashRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.PutSingleRequest:
+ idV2 = v.GetBody().GetObject().GetHeader().GetContainerID()
+ case *objectV2.PatchRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ default:
+ return cid.ID{}, errors.New("unknown request type")
}
- if objV2 != nil {
- objID = new(oid.ID)
- if err = objID.ReadFromV2(*objV2); err != nil {
- return
- }
+ if idV2 == nil {
+ return cid.ID{}, errMissingContainerID
}
- return
+
+ return id, id.ReadFromV2(*idV2)
}
// originalBearerToken goes down to original request meta header and fetches
@@ -52,6 +75,50 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er
return &tok, tok.ReadFromV2(*tokV2)
}
+// originalSessionToken goes down to original request meta header and fetches
+// session token from there.
+func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) {
+ for header.GetOrigin() != nil {
+ header = header.GetOrigin()
+ }
+
+ tokV2 := header.GetSessionToken()
+ if tokV2 == nil {
+ return nil, nil
+ }
+
+ var tok sessionSDK.Object
+
+ err := tok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ return &tok, nil
+}
+
+// getObjectIDFromRequestBody decodes oid.ID from the common interface of the
+// object reference's holders. Returns an error if object ID is missing in the request.
+func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) {
+ idV2 := body.GetAddress().GetObjectID()
+ return getObjectIDFromRefObjectID(idV2)
+}
+
+func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) {
+ if idV2 == nil {
+ return nil, errors.New("missing object ID")
+ }
+
+ var id oid.ID
+
+ err := id.ReadFromV2(*idV2)
+ if err != nil {
+ return nil, err
+ }
+
+ return &id, nil
+}
+
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
// 1. First check signature of session token.
if !token.VerifySignature() {
@@ -105,16 +172,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-// assertVerb checks that token verb corresponds to the method.
-func assertVerb(tok sessionSDK.Object, method string) bool {
- switch method {
- case nativeschema.MethodPutObject:
+// assertVerb checks that token verb corresponds to op.
+func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
+ switch op {
+ case acl.OpObjectPut:
return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
- case nativeschema.MethodDeleteObject:
+ case acl.OpObjectDelete:
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
- case nativeschema.MethodGetObject:
+ case acl.OpObjectGet:
return tok.AssertVerb(sessionSDK.VerbObjectGet)
- case nativeschema.MethodHeadObject:
+ case acl.OpObjectHead:
return tok.AssertVerb(
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
@@ -123,15 +190,14 @@ func assertVerb(tok sessionSDK.Object, method string) bool {
sessionSDK.VerbObjectRangeHash,
sessionSDK.VerbObjectPatch,
)
- case nativeschema.MethodSearchObject:
+ case acl.OpObjectSearch:
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
- case nativeschema.MethodRangeObject:
+ case acl.OpObjectRange:
return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
- case nativeschema.MethodHashObject:
+ case acl.OpObjectHash:
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
- case nativeschema.MethodPatchObject:
- return tok.AssertVerb(sessionSDK.VerbObjectPatch)
}
+
return false
}
@@ -155,15 +221,3 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error
return nil
}
-
-func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
- key, err := unmarshalPublicKey(rawKey)
- if err != nil {
- return nil, nil, fmt.Errorf("invalid signature key: %w", err)
- }
-
- var idSender user.ID
- user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
-
- return &idSender, key, nil
-}
diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go
new file mode 100644
index 000000000..435339683
--- /dev/null
+++ b/pkg/services/object/acl/v2/util_test.go
@@ -0,0 +1,136 @@
+package v2
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
+ aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOriginalTokens(t *testing.T) {
+ sToken := sessiontest.ObjectSigned()
+ bToken := bearertest.Token()
+
+ pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, bToken.Sign(*pk))
+
+ var bTokenV2 acl.BearerToken
+ bToken.WriteToV2(&bTokenV2)
+ // This line is needed because SDK uses some custom format for
+ // reserved filters, so `cid.ID` is not converted to string immediately.
+ require.NoError(t, bToken.ReadFromV2(bTokenV2))
+
+ var sTokenV2 session.Token
+ sToken.WriteToV2(&sTokenV2)
+
+ for i := range 10 {
+ metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
+ res, err := originalSessionToken(metaHeaders)
+ require.NoError(t, err)
+ require.Equal(t, sToken, res, i)
+
+ bTok, err := originalBearerToken(metaHeaders)
+ require.NoError(t, err)
+ require.Equal(t, &bToken, bTok, i)
+ }
+}
+
+func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader {
+ metaHeader := new(session.RequestMetaHeader)
+ metaHeader.SetBearerToken(b)
+ metaHeader.SetSessionToken(s)
+
+ for i := uint32(0); i < depth; i++ {
+ link := metaHeader
+ metaHeader = new(session.RequestMetaHeader)
+ metaHeader.SetOrigin(link)
+ }
+
+ return metaHeader
+}
+
+func TestIsVerbCompatible(t *testing.T) {
+ // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28
+ table := map[aclsdk.Op][]sessionSDK.ObjectVerb{
+ aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete},
+ aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete},
+ aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet},
+ aclsdk.OpObjectHead: {
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ },
+ aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash},
+ aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash},
+ aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
+ }
+
+ verbs := []sessionSDK.ObjectVerb{
+ sessionSDK.VerbObjectPut,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectSearch,
+ }
+
+ var tok sessionSDK.Object
+
+ for op, list := range table {
+ for _, verb := range verbs {
+ var contains bool
+ for _, v := range list {
+ if v == verb {
+ contains = true
+ break
+ }
+ }
+
+ tok.ForVerb(verb)
+
+ require.Equal(t, contains, assertVerb(tok, op),
+ "%v in token, %s executing", verb, op)
+ }
+ }
+}
+
+func TestAssertSessionRelation(t *testing.T) {
+ var tok sessionSDK.Object
+ cnr := cidtest.ID()
+ cnrOther := cidtest.ID()
+ obj := oidtest.ID()
+ objOther := oidtest.ID()
+
+ // make sure ids differ, otherwise test won't work correctly
+ require.False(t, cnrOther.Equals(cnr))
+ require.False(t, objOther.Equals(obj))
+
+ // bind session to the container (required)
+ tok.BindContainer(cnr)
+
+ // test container-global session
+ require.NoError(t, assertSessionRelation(tok, cnr, nil))
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnrOther, nil))
+ require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
+
+ // limit the session to the particular object
+ tok.LimitByObjects(obj)
+
+ // test fixed object session (here obj arg must be non-nil everywhere)
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnr, &objOther))
+}
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
index bb6067a37..3688638d0 100644
--- a/pkg/services/object/ape/checker.go
+++ b/pkg/services/object/ape/checker.go
@@ -5,12 +5,12 @@ import (
"errors"
"fmt"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -64,8 +64,11 @@ type Prm struct {
// An encoded container's owner user ID.
ContainerOwner user.ID
- // Attributes defined for the container.
- ContainerAttributes map[string]string
+ // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
+ SoftAPECheck bool
+
+ // If true, object headers will not retrieved from storage engine.
+ WithoutHeaderRequest bool
// The request's bearer token. It is used in order to check APE overrides with the token.
BearerToken *bearer.Token
@@ -79,10 +82,9 @@ var errMissingOID = errors.New("object ID is not set")
// CheckAPE prepares an APE-request and checks if it is permitted by policies.
func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
// APE check is ignored for some inter-node requests.
- switch prm.Role {
- case nativeschema.PropertyValueContainerRoleContainer:
+ if prm.Role == nativeschema.PropertyValueContainerRoleContainer {
return nil
- case nativeschema.PropertyValueContainerRoleIR:
+ } else if prm.Role == nativeschema.PropertyValueContainerRoleIR {
switch prm.Method {
case nativeschema.MethodGetObject,
nativeschema.MethodHeadObject,
@@ -103,12 +105,13 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
return err
}
- return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{
+ return c.checkerCore.CheckAPE(checkercore.CheckPrm{
Request: r,
PublicKey: pub,
- Namespace: prm.Namespace,
+ Namespace: prm.Method,
Container: prm.Container,
ContainerOwner: prm.ContainerOwner,
BearerToken: prm.BearerToken,
+ SoftAPECheck: prm.SoftAPECheck,
})
}
diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go
index 97eb2b2d7..090f6a83c 100644
--- a/pkg/services/object/ape/checker_test.go
+++ b/pkg/services/object/ape/checker_test.go
@@ -8,13 +8,13 @@ import (
"fmt"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
@@ -219,7 +219,7 @@ func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
return pk.GetScriptHash()
}
-func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
+func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) {
v, ok := f.subjects[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -227,7 +227,7 @@ func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160
return v, nil
}
-func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
+func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) {
v, ok := f.subjectsExtended[key]
if !ok {
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
@@ -619,21 +619,21 @@ type netmapStub struct {
currentEpoch uint64
}
-func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
+func (s *netmapStub) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
if diff >= s.currentEpoch {
return nil, errors.New("invalid diff")
}
- return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
+ return s.GetNetMapByEpoch(s.currentEpoch - diff)
}
-func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
+func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, errors.New("netmap not found")
}
-func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
+func (s *netmapStub) Epoch() (uint64, error) {
return s.currentEpoch, nil
}
@@ -641,18 +641,18 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container
}
-func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
+func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
-func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
+func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
return nil, nil
}
-func TestGetECChunk(t *testing.T) {
+func TestPutECChunk(t *testing.T) {
headerProvider := newHeaderProviderMock()
frostfsidProvider := newFrostfsIDProviderMock(t)
@@ -666,10 +666,11 @@ func TestGetECChunk(t *testing.T) {
Rules: []chain.Rule{
{
Status: chain.AccessDenied,
- Actions: chain.Actions{Names: methodsRequiredOID},
+ Actions: chain.Actions{Names: methodsOptionalOID},
Resources: chain.Resources{
Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
},
+ Any: true,
Condition: []chain.Condition{
{
Op: chain.CondStringEquals,
@@ -679,32 +680,21 @@ func TestGetECChunk(t *testing.T) {
},
},
},
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: methodsRequiredOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- },
},
+ MatchType: chain.MatchTypeFirstMatch,
})
node1Key, err := keys.NewPrivateKey()
require.NoError(t, err)
node1 := netmapSDK.NodeInfo{}
node1.SetPublicKey(node1Key.PublicKey().Bytes())
- node2Key, err := keys.NewPrivateKey()
- require.NoError(t, err)
- node2 := netmapSDK.NodeInfo{}
- node2.SetPublicKey(node1Key.PublicKey().Bytes())
netmap := &netmapSDK.NetMap{}
netmap.SetEpoch(100)
- netmap.SetNodes([]netmapSDK.NodeInfo{node1, node2})
+ netmap.SetNodes([]netmapSDK.NodeInfo{node1})
nm := &netmapStub{
currentEpoch: 100,
netmaps: map[uint64]*netmapSDK.NetMap{
- 99: netmap,
100: netmap,
},
}
@@ -712,7 +702,7 @@ func TestGetECChunk(t *testing.T) {
cont := containerSDK.Container{}
cont.Init()
pp := netmapSDK.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("EC 1.1"))
+ require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
cs := &testContainerSource{
containers: map[cid.ID]*container.Container{
@@ -728,7 +718,7 @@ func TestGetECChunk(t *testing.T) {
chunkHeader := newHeaderObjectSDK(cnr, obj, nil).ToV2().GetHeader()
ecHeader := object.ECHeader{
Index: 1,
- Total: 2,
+ Total: 5,
Parent: &refs.ObjectID{},
}
chunkHeader.SetEC(&ecHeader)
@@ -747,33 +737,32 @@ func TestGetECChunk(t *testing.T) {
})
headerProvider.addHeader(cnr, ecParentID, parentHeader)
- // container node requests EC parent headers, so container node denies access by matching attribute key/value
- t.Run("access denied on container node", func(t *testing.T) {
+ t.Run("access denied for container node", func(t *testing.T) {
prm := Prm{
- Method: nativeschema.MethodGetObject,
- Container: cnr,
- Object: obj,
- Role: role,
- SenderKey: hex.EncodeToString(node2Key.PublicKey().Bytes()),
- Header: chunkHeader,
+ Method: nativeschema.MethodPutObject,
+ Container: cnr,
+ Object: obj,
+ Role: role,
+ SenderKey: senderKey,
+ Header: chunkHeader,
+ SoftAPECheck: true,
}
err = checker.CheckAPE(context.Background(), prm)
require.Error(t, err)
})
-
- // non container node has no access rights to collect EC parent header, so it uses EC chunk headers
- t.Run("access allowed on non container node", func(t *testing.T) {
+ t.Run("access allowed for non container node", func(t *testing.T) {
otherKey, err := keys.NewPrivateKey()
require.NoError(t, err)
checker = NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, otherKey.PublicKey().Bytes())
prm := Prm{
- Method: nativeschema.MethodGetObject,
- Container: cnr,
- Object: obj,
- Role: nativeschema.PropertyValueContainerRoleOthers,
- SenderKey: senderKey,
- Header: chunkHeader,
+ Method: nativeschema.MethodPutObject,
+ Container: cnr,
+ Object: obj,
+ Role: nativeschema.PropertyValueContainerRoleOthers,
+ SenderKey: senderKey,
+ Header: chunkHeader,
+ SoftAPECheck: true,
}
err = checker.CheckAPE(context.Background(), prm)
diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go
index 82e660a7f..1b2024ed5 100644
--- a/pkg/services/object/ape/errors.go
+++ b/pkg/services/object/ape/errors.go
@@ -1,34 +1,10 @@
package ape
import (
- "errors"
-
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
-var (
- errMissingContainerID = malformedRequestError("missing container ID")
- errEmptyVerificationHeader = malformedRequestError("empty verification header")
- errEmptyBodySig = malformedRequestError("empty at body signature")
- errInvalidSessionSig = malformedRequestError("invalid session token signature")
- errInvalidSessionOwner = malformedRequestError("invalid session token owner")
- errInvalidVerb = malformedRequestError("session token verb is invalid")
-)
-
-func malformedRequestError(reason string) error {
- invalidArgErr := &apistatus.InvalidArgument{}
- invalidArgErr.SetMessage(reason)
- return invalidArgErr
-}
-
func toStatusErr(err error) error {
- var chRouterErr *checkercore.ChainRouterError
- if !errors.As(err, &chRouterErr) {
- errServerInternal := &apistatus.ServerInternal{}
- apistatus.WriteInternalServerErr(errServerInternal, err)
- return errServerInternal
- }
errAccessDenied := &apistatus.ObjectAccessDenied{}
errAccessDenied.WriteReason("ape denied request: " + err.Error())
return errAccessDenied
diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go
deleted file mode 100644
index 102985aa6..000000000
--- a/pkg/services/object/ape/metadata.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package ape
-
-import (
- "context"
- "encoding/hex"
- "errors"
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-type Metadata struct {
- Container cid.ID
- Object *oid.ID
- MetaHeader *session.RequestMetaHeader
- VerificationHeader *session.RequestVerificationHeader
- SessionToken *sessionSDK.Object
- BearerToken *bearer.Token
-}
-
-func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) {
- if m.VerificationHeader == nil {
- return nil, nil, errEmptyVerificationHeader
- }
-
- if m.BearerToken != nil && m.BearerToken.Impersonate() {
- return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes())
- }
-
- // if session token is presented, use it as truth source
- if m.SessionToken != nil {
- // verify signature of session token
- return ownerFromToken(m.SessionToken)
- }
-
- // otherwise get original body signature
- bodySignature := originalBodySignature(m.VerificationHeader)
- if bodySignature == nil {
- return nil, nil, errEmptyBodySig
- }
-
- return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
-}
-
-// RequestInfo contains request information extracted by request metadata.
-type RequestInfo struct {
- // Role defines under which role this request is executed.
- // It must be represented only as a constant represented in native schema.
- Role string
-
- ContainerOwner user.ID
-
- ContainerAttributes map[string]string
-
- // Namespace defines to which namespace a container is belonged.
- Namespace string
-
- // HEX-encoded sender key.
- SenderKey string
-}
-
-type RequestInfoExtractor interface {
- GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error)
-}
-
-type extractor struct {
- containers container.Source
-
- nm netmap.Source
-
- classifier objectCore.SenderClassifier
-}
-
-func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor {
- return &extractor{
- containers: containers,
- nm: nm,
- classifier: objectCore.NewSenderClassifier(irFetcher, nm, log),
- }
-}
-
-func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error {
- currentEpoch, err := e.nm.Epoch(ctx)
- if err != nil {
- return errors.New("can't fetch current epoch")
- }
- if sessionToken.ExpiredAt(currentEpoch) {
- return new(apistatus.SessionTokenExpired)
- }
- if sessionToken.InvalidAt(currentEpoch) {
- return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch)
- }
- if !assertVerb(*sessionToken, method) {
- return errInvalidVerb
- }
- return nil
-}
-
-func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) {
- cnr, err := e.containers.Get(ctx, m.Container)
- if err != nil {
- return ri, err
- }
-
- if m.SessionToken != nil {
- if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil {
- return ri, err
- }
- }
-
- ownerID, ownerKey, err := m.RequestOwner()
- if err != nil {
- return ri, err
- }
- res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value)
- if err != nil {
- return ri, err
- }
-
- ri.Role = nativeSchemaRole(res.Role)
- ri.ContainerOwner = cnr.Value.Owner()
-
- ri.ContainerAttributes = map[string]string{}
- for key, val := range cnr.Value.Attributes() {
- ri.ContainerAttributes[key] = val
- }
-
- cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
- if hasNamespace {
- ri.Namespace = cnrNamespace
- }
-
- // it is assumed that at the moment the key will be valid,
- // otherwise the request would not pass validation
- ri.SenderKey = hex.EncodeToString(res.Key)
-
- return ri, nil
-}
-
-func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
- var sTok *sessionSDK.Object
-
- if tokV2 != nil {
- sTok = new(sessionSDK.Object)
-
- err := sTok.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
- // if session relates to object's removal, we don't check
- // relation of the tombstone to the session here since user
- // can't predict tomb's ID.
- err = assertSessionRelation(*sTok, cnr, nil)
- } else {
- err = assertSessionRelation(*sTok, cnr, obj)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- return sTok, nil
-}
diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go
index 39dd7f476..da5307ca7 100644
--- a/pkg/services/object/ape/request.go
+++ b/pkg/services/object/ape/request.go
@@ -3,16 +3,14 @@ package ape
import (
"context"
"crypto/sha256"
- "errors"
"fmt"
"net"
"strconv"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -26,8 +24,6 @@ import (
var defaultRequest = aperequest.Request{}
-var errECMissingParentObjectID = errors.New("missing EC parent object ID")
-
func nativeSchemaRole(role acl.Role) string {
switch role {
case acl.RoleOwner:
@@ -57,16 +53,11 @@ func resourceName(cid cid.ID, oid *oid.ID, namespace string) string {
}
// objectProperties collects object properties from address parameters and a header if it is passed.
-func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string {
+func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string {
objectProps := map[string]string{
nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(),
}
- for attrName, attrValue := range cnrAttrs {
- prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName)
- objectProps[prop] = attrValue
- }
-
objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString()
if oid != nil {
@@ -125,16 +116,13 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
var header *objectV2.Header
if prm.Header != nil {
header = prm.Header
- } else if prm.Object != nil {
+ } else if prm.Object != nil && !prm.WithoutHeaderRequest {
headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object, true)
if err == nil {
header = headerObjSDK.ToV2().GetHeader()
}
}
- header, err := c.fillHeaderWithECParent(ctx, prm, header)
- if err != nil {
- return defaultRequest, fmt.Errorf("get EC parent header: %w", err)
- }
+ header = c.fillHeaderWithECParent(ctx, prm, header)
reqProps := map[string]string{
nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
nativeschema.PropertyKeyActorRole: prm.Role,
@@ -145,7 +133,8 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
reqProps[xheadKey] = xhead.GetValue()
}
- reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm)
+ var err error
+ reqProps, err = c.fillWithUserClaimTags(reqProps, prm)
if err != nil {
return defaultRequest, err
}
@@ -160,58 +149,50 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
prm.Method,
aperequest.NewResource(
resourceName(prm.Container, prm.Object, prm.Namespace),
- objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header),
+ objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header),
),
reqProps,
), nil
}
-func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) (*objectV2.Header, error) {
+func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) *objectV2.Header {
if header == nil {
- return header, nil
+ return header
}
if header.GetEC() == nil {
- return header, nil
+ return header
+ }
+ if prm.Role == nativeschema.PropertyValueContainerRoleContainer ||
+ prm.Role == nativeschema.PropertyValueContainerRoleIR {
+ return header
}
parentObjRefID := header.GetEC().Parent
if parentObjRefID == nil {
- return nil, errECMissingParentObjectID
+ return header
}
var parentObjID oid.ID
if err := parentObjID.ReadFromV2(*parentObjRefID); err != nil {
- return nil, fmt.Errorf("EC parent object ID format error: %w", err)
+ return header
}
// only container node have access to collect parent object
- contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container)
- if err != nil {
- return nil, fmt.Errorf("check container node status: %w", err)
- }
- if !contNode {
- return header, nil
+ contNode, err := c.currentNodeIsContainerNode(prm.Container)
+ if err != nil || !contNode {
+ return header
}
parentObj, err := c.headerProvider.GetHeader(ctx, prm.Container, parentObjID, false)
if err != nil {
- if isLogicalError(err) {
- return header, nil
- }
- return nil, fmt.Errorf("EC parent header request: %w", err)
+ return header
}
- return parentObj.ToV2().GetHeader(), nil
+ return parentObj.ToV2().GetHeader()
}
-func isLogicalError(err error) bool {
- var errObjRemoved *apistatus.ObjectAlreadyRemoved
- var errObjNotFound *apistatus.ObjectNotFound
- return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound)
-}
-
-func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) {
- cnr, err := c.cnrSource.Get(ctx, cnrID)
+func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) {
+ cnr, err := c.cnrSource.Get(cnrID)
if err != nil {
return false, err
}
- nm, err := netmap.GetLatestNetworkMap(ctx, c.nm)
+ nm, err := netmap.GetLatestNetworkMap(c.nm)
if err != nil {
return false, err
}
@@ -225,7 +206,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.
return true, nil
}
- nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm)
+ nm, err = netmap.GetPreviousNetworkMap(c.nm)
if err != nil {
return false, err
}
@@ -234,7 +215,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) {
+func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
@@ -242,7 +223,7 @@ func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[st
if err != nil {
return nil, err
}
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk)
+ props, err := aperequest.FormFrostfsIDRequestProperties(c.frostFSIDClient, pk)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go
index fcf7c4c40..9dad69d17 100644
--- a/pkg/services/object/ape/request_test.go
+++ b/pkg/services/object/ape/request_test.go
@@ -6,9 +6,8 @@ import (
"net"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -20,20 +19,11 @@ import (
)
const (
- testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y"
+ testOwnerID = "FPPtmAi9TCX329"
incomingIP = "192.92.33.1"
-
- testSysAttrName = "unittest"
-
- testSysAttrZone = "eggplant"
)
-var containerAttrs = map[string]string{
- cnrV2.SysAttributeName: testSysAttrName,
- cnrV2.SysAttributeZone: testSysAttrZone,
-}
-
func ctxWithPeerInfo() context.Context {
return peer.NewContext(context.Background(), &peer.Peer{
Addr: &net.TCPAddr{
@@ -115,7 +105,7 @@ func TestObjectProperties(t *testing.T) {
var testCnrOwner user.ID
require.NoError(t, testCnrOwner.DecodeString(testOwnerID))
- props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader())
+ props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader())
require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID])
require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID])
@@ -134,8 +124,6 @@ func TestObjectProperties(t *testing.T) {
require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType])
require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash])
require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash])
- require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)])
- require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)])
for _, attr := range test.header.attributes {
require.Equal(t, attr.val, props[attr.key])
@@ -257,10 +245,6 @@ func TestNewAPERequest(t *testing.T) {
Role: role,
SenderKey: senderKey,
ContainerOwner: testCnrOwner,
- ContainerAttributes: map[string]string{
- cnrV2.SysAttributeZone: testSysAttrZone,
- cnrV2.SysAttributeName: testSysAttrName,
- },
}
headerSource := newHeaderProviderMock()
@@ -293,7 +277,7 @@ func TestNewAPERequest(t *testing.T) {
method,
aperequest.NewResource(
resourceName(cnr, obj, prm.Namespace),
- objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header {
+ objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
if headerObjSDK != nil {
return headerObjSDK.ToV2().GetHeader()
}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index 5e04843f3..a1634e7c5 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -2,24 +2,31 @@ package ape
import (
"context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
)
-type Service struct {
- apeChecker Checker
+var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
- extractor RequestInfoExtractor
+type Service struct {
+ log *logger.Logger
+
+ apeChecker Checker
next objectSvc.ServiceServer
}
@@ -60,10 +67,10 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
}
}
-func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service {
+func NewService(log *logger.Logger, apeChecker Checker, next objectSvc.ServiceServer) *Service {
return &Service{
+ log: log,
apeChecker: apeChecker,
- extractor: extractor,
next: next,
}
}
@@ -73,9 +80,17 @@ type getStreamBasicChecker struct {
apeChecker Checker
- metadata Metadata
+ namespace string
- reqInfo RequestInfo
+ senderKey []byte
+
+ containerOwner user.ID
+
+ role string
+
+ softAPECheck bool
+
+ bearerToken *bearer.Token
}
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
@@ -86,17 +101,17 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
}
prm := Prm{
- Namespace: g.reqInfo.Namespace,
- Container: cnrID,
- Object: objID,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodGetObject,
- SenderKey: g.reqInfo.SenderKey,
- ContainerOwner: g.reqInfo.ContainerOwner,
- ContainerAttributes: g.reqInfo.ContainerAttributes,
- Role: g.reqInfo.Role,
- BearerToken: g.metadata.BearerToken,
- XHeaders: resp.GetMetaHeader().GetXHeaders(),
+ Namespace: g.namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodGetObject,
+ SenderKey: hex.EncodeToString(g.senderKey),
+ ContainerOwner: g.containerOwner,
+ Role: g.role,
+ SoftAPECheck: g.softAPECheck,
+ BearerToken: g.bearerToken,
+ XHeaders: resp.GetMetaHeader().GetXHeaders(),
}
if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil {
@@ -106,54 +121,66 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
return g.GetObjectStream.Send(resp)
}
+func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) {
+ untyped := ctx.Value(objectSvc.RequestContextKey)
+ if untyped == nil {
+ return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey)
+ }
+ rc, ok := untyped.(*objectSvc.RequestContext)
+ if !ok {
+ return nil, errFailedToCastToRequestContext
+ }
+ return rc, nil
+}
+
func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ reqCtx, err := requestContext(stream.Context())
if err != nil {
- return err
- }
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject)
- if err != nil {
- return err
+ return toStatusErr(err)
}
+
return c.next.Get(request, &getStreamBasicChecker{
GetObjectStream: stream,
apeChecker: c.apeChecker,
- metadata: md,
- reqInfo: reqInfo,
+ namespace: reqCtx.Namespace,
+ senderKey: reqCtx.SenderKey,
+ containerOwner: reqCtx.ContainerOwner,
+ role: nativeSchemaRole(reqCtx.Role),
+ softAPECheck: reqCtx.SoftAPECheck,
+ bearerToken: reqCtx.BearerToken,
})
}
type putStreamBasicChecker struct {
apeChecker Checker
- extractor RequestInfoExtractor
-
next objectSvc.PutObjectStream
}
func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
+ reqCtx, err := requestContext(ctx)
if err != nil {
- return err
+ return toStatusErr(err)
}
- reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
+
+ cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
if err != nil {
- return err
+ return toStatusErr(err)
}
prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodPutObject,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- Role: reqInfo.Role,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -168,12 +195,11 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR
return p.next.CloseAndRecv(ctx)
}
-func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
- streamer, err := c.next.Put(ctx)
+func (c *Service) Put() (objectSvc.PutObjectStream, error) {
+ streamer, err := c.next.Put()
return &putStreamBasicChecker{
apeChecker: c.apeChecker,
- extractor: c.extractor,
next: streamer,
}, err
}
@@ -181,8 +207,6 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
type patchStreamBasicChecker struct {
apeChecker Checker
- extractor RequestInfoExtractor
-
next objectSvc.PatchObjectStream
nonFirstSend bool
@@ -192,26 +216,27 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa
if !p.nonFirstSend {
p.nonFirstSend = true
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ reqCtx, err := requestContext(ctx)
if err != nil {
- return err
+ return toStatusErr(err)
}
- reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject)
+
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return err
+ return toStatusErr(err)
}
prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodPatchObject,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- Role: reqInfo.Role,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodPatchObject,
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -226,22 +251,22 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa
return p.next.CloseAndRecv(ctx)
}
-func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) {
- streamer, err := c.next.Patch(ctx)
+func (c *Service) Patch() (objectSvc.PatchObjectStream, error) {
+ streamer, err := c.next.Patch()
return &patchStreamBasicChecker{
apeChecker: c.apeChecker,
- extractor: c.extractor,
next: streamer,
}, err
}
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject)
+
+ reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
@@ -255,7 +280,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
switch headerPart := resp.GetBody().GetHeaderPart().(type) {
case *objectV2.ShortHeader:
cidV2 := new(refs.ContainerID)
- md.Container.WriteToV2(cidV2)
+ cnrID.WriteToV2(cidV2)
header.SetContainerID(cidV2)
header.SetVersion(headerPart.GetVersion())
header.SetCreationEpoch(headerPart.GetCreationEpoch())
@@ -271,17 +296,17 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: header,
- Method: nativeschema.MethodHeadObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: header,
+ Method: nativeschema.MethodHeadObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@@ -290,25 +315,28 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
- md, err := newMetadata(request, request.GetBody().GetContainerID(), nil)
- if err != nil {
- return err
+ var cnrID cid.ID
+ if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil {
+ if err := cnrID.ReadFromV2(*cnrV2); err != nil {
+ return toStatusErr(err)
+ }
}
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject)
+
+ reqCtx, err := requestContext(stream.Context())
if err != nil {
- return err
+ return toStatusErr(err)
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Method: nativeschema.MethodSearchObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Method: nativeschema.MethodSearchObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@@ -318,26 +346,27 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc
}
func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject)
+
+ reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodDeleteObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodDeleteObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@@ -352,26 +381,27 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (
}
func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
- return err
+ return toStatusErr(err)
}
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject)
+
+ reqCtx, err := requestContext(stream.Context())
if err != nil {
- return err
+ return toStatusErr(err)
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodRangeObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodRangeObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@@ -381,26 +411,27 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G
}
func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject)
+
+ reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodHashObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodHashObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
}
resp, err := c.next.GetRangeHash(ctx, request)
@@ -415,27 +446,28 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa
}
func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
if err != nil {
return nil, err
}
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
+
+ reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: request.GetBody().GetObject().GetHeader(),
- Method: nativeschema.MethodPutObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- ContainerAttributes: reqInfo.ContainerAttributes,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: request.GetBody().GetObject().GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
}
if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
@@ -445,36 +477,18 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ
return c.next.PutSingle(ctx, request)
}
-type request interface {
- GetMetaHeader() *session.RequestMetaHeader
- GetVerificationHeader() *session.RequestVerificationHeader
-}
-
-func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) {
- meta := request.GetMetaHeader()
- for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
- meta = origin
+func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
+ if cidV2 != nil {
+ if err = cnrID.ReadFromV2(*cidV2); err != nil {
+ return
+ }
}
- cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2)
- if err != nil {
- return
- }
- session, err := readSessionToken(cnrID, objID, meta.GetSessionToken())
- if err != nil {
- return
- }
- bearer, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return
- }
-
- md = Metadata{
- Container: cnrID,
- Object: objID,
- VerificationHeader: request.GetVerificationHeader(),
- SessionToken: session,
- BearerToken: bearer,
+ if objV2 != nil {
+ objID = new(oid.ID)
+ if err = objID.ReadFromV2(*objV2); err != nil {
+ return
+ }
}
return
}
diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go
index 97dbfa658..46e55360d 100644
--- a/pkg/services/object/ape/types.go
+++ b/pkg/services/object/ape/types.go
@@ -7,11 +7,3 @@ import "context"
type Checker interface {
CheckAPE(context.Context, Prm) error
}
-
-// InnerRingFetcher is an interface that must provide
-// Inner Ring information.
-type InnerRingFetcher interface {
- // InnerRingKeys must return list of public keys of
- // the actual inner ring.
- InnerRingKeys(ctx context.Context) ([][]byte, error)
-}
diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go
deleted file mode 100644
index 916bce427..000000000
--- a/pkg/services/object/ape/util_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package ape
-
-import (
- "slices"
- "testing"
-
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/stretchr/testify/require"
-)
-
-func TestIsVerbCompatible(t *testing.T) {
- table := map[string][]sessionSDK.ObjectVerb{
- nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch},
- nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete},
- nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet},
- nativeschema.MethodHeadObject: {
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectPatch,
- },
- nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch},
- nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash},
- nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
- nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch},
- }
-
- verbs := []sessionSDK.ObjectVerb{
- sessionSDK.VerbObjectPut,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectSearch,
- sessionSDK.VerbObjectPatch,
- }
-
- var tok sessionSDK.Object
-
- for op, list := range table {
- for _, verb := range verbs {
- contains := slices.Contains(list, verb)
-
- tok.ForVerb(verb)
-
- require.Equal(t, contains, assertVerb(tok, op),
- "%v in token, %s executing", verb, op)
- }
- }
-}
-
-func TestAssertSessionRelation(t *testing.T) {
- var tok sessionSDK.Object
- cnr := cidtest.ID()
- cnrOther := cidtest.ID()
- obj := oidtest.ID()
- objOther := oidtest.ID()
-
- // make sure ids differ, otherwise test won't work correctly
- require.False(t, cnrOther.Equals(cnr))
- require.False(t, objOther.Equals(obj))
-
- // bind session to the container (required)
- tok.BindContainer(cnr)
-
- // test container-global session
- require.NoError(t, assertSessionRelation(tok, cnr, nil))
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnrOther, nil))
- require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
-
- // limit the session to the particular object
- tok.LimitByObjects(obj)
-
- // test fixed object session (here obj arg must be non-nil everywhere)
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnr, &objOther))
-}
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
index f8ee089fe..39e1f9f2d 100644
--- a/pkg/services/object/audit.go
+++ b/pkg/services/object/audit.go
@@ -5,12 +5,12 @@ import (
"errors"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -37,7 +37,7 @@ func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (*
if !a.enabled.Load() {
return res, err
}
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return res, err
}
@@ -48,7 +48,7 @@ func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error
if !a.enabled.Load() {
return err
}
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return err
}
@@ -59,7 +59,7 @@ func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRan
if !a.enabled.Load() {
return err
}
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return err
}
@@ -70,7 +70,7 @@ func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHas
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return resp, err
}
@@ -81,19 +81,19 @@ func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*obje
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
return resp, err
}
// Put implements ServiceServer.
-func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) {
- res, err := a.next.Put(ctx)
+func (a *auditService) Put() (PutObjectStream, error) {
+ res, err := a.next.Put()
if !a.enabled.Load() {
return res, err
}
if err != nil {
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
return res, err
}
return &auditPutStream{
@@ -108,7 +108,7 @@ func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleReque
if !a.enabled.Load() {
return resp, err
}
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(),
req.GetBody().GetObject().GetObjectID()),
err == nil)
@@ -121,7 +121,7 @@ func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) er
if !a.enabled.Load() {
return err
}
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return err
}
@@ -145,7 +145,7 @@ func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse,
a.failed = true
}
a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
return resp, err
@@ -163,8 +163,8 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error
if err != nil {
a.failed = true
}
- if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
}
@@ -183,13 +183,13 @@ type auditPatchStream struct {
nonFirstSend bool
}
-func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) {
- res, err := a.next.Patch(ctx)
+func (a *auditService) Patch() (PatchObjectStream, error) {
+ res, err := a.next.Patch()
if !a.enabled.Load() {
return res, err
}
if err != nil {
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
return res, err
}
return &auditPatchStream{
@@ -205,7 +205,7 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo
a.failed = true
}
a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
return resp, err
@@ -224,8 +224,8 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e
if err != nil {
a.failed = true
}
- if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
}
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index ef65e78bc..f48cc5b3d 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -3,7 +3,7 @@ package object
import (
"context"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
@@ -40,20 +40,20 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
return x.nextHandler.Get(req, stream)
}
-func (x *Common) Put(ctx context.Context) (PutObjectStream, error) {
+func (x *Common) Put() (PutObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Put(ctx)
+ return x.nextHandler.Put()
}
-func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) {
+func (x *Common) Patch() (PatchObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Patch(ctx)
+ return x.nextHandler.Patch()
}
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index f2bd907db..980c4c6bd 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -1,7 +1,6 @@
package target
import (
- "context"
"errors"
"fmt"
@@ -14,20 +13,20 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+func New(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
// prepare needed put parameters
- if err := preparePrm(ctx, &prm); err != nil {
+ if err := preparePrm(prm); err != nil {
return nil, fmt.Errorf("could not prepare put parameters: %w", err)
}
if prm.Header.Signature() != nil {
- return newUntrustedTarget(ctx, &prm)
+ return newUntrustedTarget(prm)
}
- return newTrustedTarget(ctx, &prm)
+ return newTrustedTarget(prm)
}
-func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
+func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
}
@@ -49,9 +48,8 @@ func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transfor
}, nil
}
-func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- prm.Relay = nil // do not relay request without signature
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
+func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
}
@@ -87,10 +85,12 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme
user.IDFromKey(&ownerSession, key.PublicKey)
if !ownerObj.Equals(ownerSession) {
- return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession)
+ return nil, errors.New("session token is missing but object owner id is different from the default key")
+ }
+ } else {
+ if !ownerObj.Equals(sessionInfo.Owner) {
+ return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
}
- } else if !ownerObj.Equals(sessionInfo.Owner) {
- return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
}
if prm.SignRequestPrivateKey == nil {
@@ -110,11 +110,11 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme
}, nil
}
-func preparePrm(ctx context.Context, prm *objectwriter.Params) error {
+func preparePrm(prm *objectwriter.Params) error {
var err error
// get latest network map
- nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource)
+ nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource)
if err != nil {
return fmt.Errorf("could not get latest network map: %w", err)
}
@@ -125,7 +125,7 @@ func preparePrm(ctx context.Context, prm *objectwriter.Params) error {
}
// get container to store the object
- cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr)
+ cnrInfo, err := prm.Config.ContainerSource.Get(idCnr)
if err != nil {
return fmt.Errorf("could not get container by ID: %w", err)
}
diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go
index 6593d3ca0..6689557ee 100644
--- a/pkg/services/object/common/writer/common.go
+++ b/pkg/services/object/common/writer/common.go
@@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
}
func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
- traverser, err := placement.NewTraverser(ctx, n.Opts...)
+ traverser, err := placement.NewTraverser(n.Traversal.Opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
@@ -56,10 +56,10 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
}
// perform additional container broadcast if needed
- if n.submitPrimaryPlacementFinish() {
+ if n.Traversal.submitPrimaryPlacementFinish() {
err := n.ForEachNode(ctx, f)
if err != nil {
- n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+ n.cfg.Logger.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -79,29 +79,33 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.
continue
}
- isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey())
+ workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey())
item := new(bool)
wg.Add(1)
- go func() {
+ if err := workerPool.Submit(func() {
defer wg.Done()
err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr})
if err != nil {
resErr.Store(err)
- svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err)
+ svcutil.LogServiceError(n.cfg.Logger, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
- }()
+ }); err != nil {
+ wg.Done()
+ svcutil.LogWorkerPoolError(n.cfg.Logger, "PUT", err)
+ return true
+ }
// Mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
- n.submitProcessed(addr, item)
+ n.Traversal.submitProcessed(addr, item)
}
wg.Wait()
diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go
index fff58aca7..f62934bed 100644
--- a/pkg/services/object/common/writer/distributed.go
+++ b/pkg/services/object/common/writer/distributed.go
@@ -28,7 +28,7 @@ type distributedWriter struct {
resetSuccessAfterOnBroadcast bool
}
-// Traversal parameters and state of container.
+// parameters and state of container Traversal.
type Traversal struct {
Opts []placement.Option
@@ -95,10 +95,6 @@ func (x errIncompletePut) Error() string {
return commonMsg
}
-func (x errIncompletePut) Unwrap() error {
- return x.singleErr
-}
-
// WriteObject implements the transformer.ObjectWriter interface.
func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
t.obj = obj
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index 26a53e315..fb0a8e4e5 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -14,7 +14,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
@@ -26,10 +25,7 @@ import (
var _ transformer.ObjectWriter = (*ECWriter)(nil)
-var (
- errUnsupportedECObject = errors.New("object is not supported for erasure coding")
- errFailedToSaveAllECParts = errors.New("failed to save all EC parts")
-)
+var errUnsupportedECObject = errors.New("object is not supported for erasure coding")
type ECWriter struct {
Config *Config
@@ -41,12 +37,10 @@ type ECWriter struct {
ObjectMeta object.ContentMeta
ObjectMetaValid bool
-
- remoteRequestSignKey *ecdsa.PrivateKey
}
func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
- relayed, isContainerNode, err := e.relayIfNotContainerNode(ctx, obj)
+ relayed, err := e.relayIfNotContainerNode(ctx, obj)
if err != nil {
return err
}
@@ -66,35 +60,23 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
e.ObjectMetaValid = true
}
- if isContainerNode {
- restoreTokens := e.CommonPrm.ForgetTokens()
- defer restoreTokens()
- // As request executed on container node, so sign request with container key.
- e.remoteRequestSignKey, err = e.Config.KeyStorage.GetKey(nil)
- if err != nil {
- return err
- }
- } else {
- e.remoteRequestSignKey = e.Key
- }
-
if obj.ECHeader() != nil {
return e.writeECPart(ctx, obj)
}
return e.writeRawObject(ctx, obj)
}
-func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) {
- currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx)
+func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) {
+ if e.Relay == nil {
+ return false, nil
+ }
+ currentNodeIsContainerNode, err := e.currentNodeIsContainerNode()
if err != nil {
- return false, false, err
+ return false, err
}
if currentNodeIsContainerNode {
// object can be splitted or saved local
- return false, true, nil
- }
- if e.Relay == nil {
- return false, currentNodeIsContainerNode, nil
+ return false, nil
}
objID := object.AddressOf(obj).Object()
var index uint32
@@ -103,13 +85,13 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O
index = obj.ECHeader().Index()
}
if err := e.relayToContainerNode(ctx, objID, index); err != nil {
- return false, false, err
+ return false, err
}
- return true, currentNodeIsContainerNode, nil
+ return true, nil
}
-func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) {
- t, err := placement.NewTraverser(ctx, e.PlacementOpts...)
+func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
+ t, err := placement.NewTraverser(e.PlacementOpts...)
if err != nil {
return false, err
}
@@ -128,7 +110,7 @@ func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error)
}
func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -149,11 +131,21 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
- err = e.Relay(ctx, info, c)
+ completed := make(chan interface{})
+ if poolErr := e.Config.RemotePool.Submit(func() {
+ defer close(completed)
+ err = e.Relay(ctx, info, c)
+ }); poolErr != nil {
+ close(completed)
+ svcutil.LogWorkerPoolError(e.Config.Logger, "PUT", poolErr)
+ return poolErr
+ }
+ <-completed
+
if err == nil {
return nil
}
- e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
+ e.Config.Logger.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
lastErr = err
}
}
@@ -170,7 +162,7 @@ func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error
return e.writePartLocal(ctx, obj)
}
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
if err != nil {
return err
}
@@ -205,15 +197,14 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
if err != nil {
return err
}
- partsProcessed := make([]atomic.Bool, len(parts))
objID, _ := obj.ID()
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
+ eg, egCtx := errgroup.WithContext(ctx)
for {
- eg, egCtx := errgroup.WithContext(ctx)
nodes := t.Next()
if len(nodes) == 0 {
break
@@ -225,31 +216,17 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
}
for idx := range parts {
- if !partsProcessed[idx].Load() {
- eg.Go(func() error {
- err := e.writePart(egCtx, parts[idx], idx, nodes, visited)
- if err == nil {
- partsProcessed[idx].Store(true)
- t.SubmitSuccess()
- }
- return err
- })
- }
+ eg.Go(func() error {
+ return e.writePart(egCtx, parts[idx], idx, nodes, visited)
+ })
+ t.SubmitSuccess()
}
- err = eg.Wait()
}
- if err != nil {
+ if err := eg.Wait(); err != nil {
return errIncompletePut{
singleErr: err,
}
}
- for idx := range partsProcessed {
- if !partsProcessed[idx].Load() {
- return errIncompletePut{
- singleErr: errFailedToSaveAllECParts,
- }
- }
- }
return nil
}
@@ -265,10 +242,8 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
err := e.putECPartToNode(ctx, obj, node)
if err == nil {
return nil
- } else if clientSDK.IsErrObjectAlreadyRemoved(err) {
- return err
}
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
@@ -292,7 +267,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -301,7 +276,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
}
// try to save to any node not visited by current part
- for i := range nodes {
+ for i := range len(nodes) {
select {
case <-ctx.Done():
return ctx.Err()
@@ -316,7 +291,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -333,11 +308,20 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n
}
func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
+ var err error
localTarget := LocalTarget{
- Storage: e.Config.LocalStore,
- Container: e.Container,
+ Storage: e.Config.LocalStore,
}
- return localTarget.WriteObject(ctx, obj, e.ObjectMeta)
+ completed := make(chan interface{})
+ if poolErr := e.Config.LocalPool.Submit(func() {
+ defer close(completed)
+ err = localTarget.WriteObject(ctx, obj, e.ObjectMeta)
+ }); poolErr != nil {
+ close(completed)
+ return poolErr
+ }
+ <-completed
+ return err
}
func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
@@ -345,11 +329,21 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n
client.NodeInfoFromNetmapElement(&clientNodeInfo, node)
remoteTaget := remoteWriter{
- privateKey: e.remoteRequestSignKey,
+ privateKey: e.Key,
clientConstructor: e.Config.ClientConstructor,
commonPrm: e.CommonPrm,
nodeInfo: clientNodeInfo,
}
- return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
+ var err error
+ completed := make(chan interface{})
+ if poolErr := e.Config.RemotePool.Submit(func() {
+ defer close(completed)
+ err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
+ }); poolErr != nil {
+ close(completed)
+ return poolErr
+ }
+ <-completed
+ return err
}
diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go
deleted file mode 100644
index d5eeddf21..000000000
--- a/pkg/services/object/common/writer/ec_test.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package writer
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "crypto/sha256"
- "errors"
- "fmt"
- "slices"
- "strconv"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-type testPlacementBuilder struct {
- vectors [][]netmap.NodeInfo
-}
-
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
- [][]netmap.NodeInfo, error,
-) {
- arr := slices.Clone(p.vectors[0])
- return [][]netmap.NodeInfo{arr}, nil
-}
-
-type nmKeys struct{}
-
-func (nmKeys) IsLocalKey(_ []byte) bool {
- return false
-}
-
-type clientConstructor struct {
- vectors [][]netmap.NodeInfo
-}
-
-func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) {
- if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) ||
- bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) {
- return multiAddressClient{err: errors.New("node unavailable")}, nil
- }
- return multiAddressClient{}, nil
-}
-
-type multiAddressClient struct {
- client.MultiAddressClient
- err error
-}
-
-func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) {
- if c.err != nil {
- return nil, c.err
- }
- return &apiclient.ResObjectPutSingle{}, nil
-}
-
-func (c multiAddressClient) ReportError(error) {
-}
-
-func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error {
- return nil
-}
-
-func TestECWriter(t *testing.T) {
- // Create container with policy EC 1.1
- cnr := container.Container{}
- p1 := netmap.PlacementPolicy{}
- p1.SetContainerBackupFactor(1)
- x1 := netmap.ReplicaDescriptor{}
- x1.SetECDataCount(1)
- x1.SetECParityCount(1)
- p1.AddReplicas(x1)
- cnr.SetPlacementPolicy(p1)
- cnr.SetAttribute("cnr", "cnr1")
-
- cid := cidtest.ID()
-
- // Create 4 nodes, 2 nodes for chunks,
- // 2 nodes for the case when the first two will fail.
- ns, _ := testNodeMatrix(t, []int{4})
-
- data := make([]byte, 100)
- _, _ = rand.Read(data)
- ver := version.Current()
-
- var csum checksum.Checksum
- csum.SetSHA256(sha256.Sum256(data))
-
- var csumTZ checksum.Checksum
- csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
-
- obj := objectSDK.New()
- obj.SetID(oidtest.ID())
- obj.SetOwnerID(usertest.ID())
- obj.SetContainerID(cid)
- obj.SetVersion(&ver)
- obj.SetPayload(data)
- obj.SetPayloadSize(uint64(len(data)))
- obj.SetPayloadChecksum(csum)
- obj.SetPayloadHomomorphicHash(csumTZ)
-
- // Builder return nodes without sort by hrw
- builder := &testPlacementBuilder{
- vectors: ns,
- }
-
- ownerKey, err := keys.NewPrivateKey()
- require.NoError(t, err)
- nodeKey, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- log, err := logger.NewLogger(logger.Prm{})
- require.NoError(t, err)
-
- var n nmKeys
- ecw := ECWriter{
- Config: &Config{
- NetmapKeys: n,
- Logger: log,
- ClientConstructor: clientConstructor{vectors: ns},
- KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil),
- },
- PlacementOpts: append(
- []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)},
- placement.WithCopyNumbers(nil)), // copies number ignored for EC
- Container: cnr,
- Key: &ownerKey.PrivateKey,
- Relay: nil,
- ObjectMetaValid: true,
- }
-
- err = ecw.WriteObject(context.Background(), obj)
- require.NoError(t, err)
-}
-
-func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
- mNodes := make([][]netmap.NodeInfo, len(dim))
- mAddr := make([][]string, len(dim))
-
- for i := range dim {
- ns := make([]netmap.NodeInfo, dim[i])
- as := make([]string, dim[i])
-
- for j := range dim[i] {
- a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
- strconv.Itoa(i),
- strconv.Itoa(60000+j),
- )
-
- var ni netmap.NodeInfo
- ni.SetNetworkEndpoints(a)
- ni.SetPublicKey([]byte(a))
-
- var na network.AddressGroup
-
- err := na.FromIterator(netmapcore.Node(ni))
- require.NoError(t, err)
-
- as[j] = network.StringifyGroup(na)
-
- ns[j] = ni
- }
-
- mNodes[i] = ns
- mAddr[i] = as
- }
-
- return mNodes, mAddr
-}
diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go
index cf3d03275..02fd25b9e 100644
--- a/pkg/services/object/common/writer/local.go
+++ b/pkg/services/object/common/writer/local.go
@@ -4,9 +4,7 @@ import (
"context"
"fmt"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -15,7 +13,7 @@ import (
type ObjectStorage interface {
// Put must save passed object
// and return any appeared error.
- Put(context.Context, *objectSDK.Object, bool) error
+ Put(context.Context, *objectSDK.Object) error
// Delete must delete passed objects
// and return any appeared error.
Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
@@ -27,15 +25,10 @@ type ObjectStorage interface {
}
type LocalTarget struct {
- Storage ObjectStorage
- Container containerSDK.Container
+ Storage ObjectStorage
}
func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
- if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
- return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
- }
-
switch meta.Type() {
case objectSDK.TypeTombstone:
err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
@@ -51,5 +44,8 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
+ if err := t.Storage.Put(ctx, obj); err != nil {
+ return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
+ }
return nil
}
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
index d3d2b41b4..3d50da988 100644
--- a/pkg/services/object/common/writer/writer.go
+++ b/pkg/services/object/common/writer/writer.go
@@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -23,7 +24,7 @@ type MaxSizeSource interface {
// of physically stored object in system.
//
// Must return 0 if value can not be obtained.
- MaxObjectSize(context.Context) uint64
+ MaxObjectSize() uint64
}
type ClientConstructor interface {
@@ -31,7 +32,7 @@ type ClientConstructor interface {
}
type InnerRing interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
+ InnerRingKeys() ([][]byte, error)
}
type FormatValidatorConfig interface {
@@ -51,6 +52,8 @@ type Config struct {
NetmapSource netmap.Source
+ RemotePool, LocalPool util.WorkerPool
+
NetmapKeys netmap.AnnouncedKeys
FormatValidator *object.FormatValidator
@@ -66,6 +69,12 @@ type Config struct {
type Option func(*Config)
+func WithWorkerPools(remote, local util.WorkerPool) Option {
+ return func(c *Config) {
+ c.RemotePool, c.LocalPool = remote, local
+ }
+}
+
func WithLogger(l *logger.Logger) Option {
return func(c *Config) {
c.Logger = l
@@ -78,6 +87,13 @@ func WithVerifySessionTokenIssuer(v bool) Option {
}
}
+func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) {
+ if c.NetmapKeys.IsLocalKey(pub) {
+ return c.LocalPool, true
+ }
+ return c.RemotePool, false
+}
+
type Params struct {
Config *Config
@@ -134,8 +150,7 @@ func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.Object
nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget {
if node.Local {
return LocalTarget{
- Storage: prm.Config.LocalStore,
- Container: prm.Container,
+ Storage: prm.Config.LocalStore,
}
}
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 57e33fde7..88454625d 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -33,13 +33,13 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ServingRequest)
+ exec.log.Debug(logs.ServingRequest)
if err := exec.executeLocal(ctx); err != nil {
- exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
+ exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
return err
}
- exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
return nil
}
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index a99ba3586..c2f92950f 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -4,13 +4,12 @@ import (
"context"
"errors"
"fmt"
- "slices"
"strconv"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -35,13 +34,13 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = l.With(
+ exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "DELETE"),
zap.Stringer("address", exec.address()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )
+ )}
}
func (exec *execCtx) isLocal() bool {
@@ -84,16 +83,16 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
exec.splitInfo = errSplitInfo.SplitInfo()
exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
- exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
+ exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
if err := exec.collectMembers(ctx); err != nil {
return err
}
- exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected)
+ exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
return nil
case errors.As(err, &errECInfo):
- exec.log.Debug(ctx, logs.DeleteECObjectReceived)
+ exec.log.Debug(logs.DeleteECObjectReceived)
return nil
}
@@ -109,7 +108,7 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
func (exec *execCtx) collectMembers(ctx context.Context) error {
if exec.splitInfo == nil {
- exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY)
+ exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
return nil
}
@@ -132,7 +131,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error {
func (exec *execCtx) collectChain(ctx context.Context) error {
var chain []oid.ID
- exec.log.Debug(ctx, logs.DeleteAssemblingChain)
+ exec.log.Debug(logs.DeleteAssemblingChain)
for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
chain = append(chain, prev)
@@ -153,7 +152,7 @@ func (exec *execCtx) collectChain(ctx context.Context) error {
}
func (exec *execCtx) collectChildren(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteCollectingChildren)
+ exec.log.Debug(logs.DeleteCollectingChildren)
children, err := exec.svc.header.children(ctx, exec)
if err != nil {
@@ -166,7 +165,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) error {
}
func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteSupplementBySplitID)
+ exec.log.Debug(logs.DeleteSupplementBySplitID)
chain, err := exec.svc.searcher.splitMembers(ctx, exec)
if err != nil {
@@ -183,7 +182,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
for i := range members {
for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body
if members[i].Equals(incoming[j]) {
- incoming = slices.Delete(incoming, j, j+1)
+ incoming = append(incoming[:j], incoming[j+1:]...)
j--
}
}
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 01b2d9b3f..2c3c47f49 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -10,13 +10,13 @@ import (
)
func (exec *execCtx) executeLocal(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure)
+ exec.log.Debug(logs.DeleteFormingTombstoneStructure)
if err := exec.formTombstone(ctx); err != nil {
return err
}
- exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
+ exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
return exec.saveTombstone(ctx)
}
@@ -33,7 +33,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) error {
)
exec.addMembers([]oid.ID{exec.address().Object()})
- exec.log.Debug(ctx, logs.DeleteFormingSplitInfo)
+ exec.log.Debug(logs.DeleteFormingSplitInfo)
if err := exec.formExtendedInfo(ctx); err != nil {
return fmt.Errorf("form extended info: %w", err)
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index 1c4d7d585..0ba21eee3 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -27,11 +27,11 @@ type Option func(*cfg)
type NetworkInfo interface {
netmap.State
- // TombstoneLifetime must return the lifespan of the tombstones
+ // Must return the lifespan of the tombstones
// in the FrostFS epochs.
TombstoneLifetime() (uint64, error)
- // LocalNodeID returns user ID of the local storage node. Result must not be nil.
+ // Returns user ID of the local storage node. Result must not be nil.
// New tombstone objects will have the result as an owner ID if removal is executed w/o a session.
LocalNodeID() user.ID
}
@@ -72,7 +72,7 @@ func New(gs *getsvc.Service,
opts ...Option,
) *Service {
c := &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
header: &headSvcWrapper{s: gs},
searcher: &searchSvcWrapper{s: ss},
placer: &putSvcWrapper{s: ps},
@@ -92,6 +92,6 @@ func New(gs *getsvc.Service,
// WithLogger returns option to specify Delete service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "objectSDK.Delete service"))}
}
}
diff --git a/pkg/services/object/delete/v2/service.go b/pkg/services/object/delete/v2/service.go
index 7146f0361..10dcd0e87 100644
--- a/pkg/services/object/delete/v2/service.go
+++ b/pkg/services/object/delete/v2/service.go
@@ -3,8 +3,8 @@ package deletesvc
import (
"context"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Delete operation of Object service v2.
diff --git a/pkg/services/object/delete/v2/util.go b/pkg/services/object/delete/v2/util.go
index c57d4562a..d0db1f543 100644
--- a/pkg/services/object/delete/v2/util.go
+++ b/pkg/services/object/delete/v2/util.go
@@ -4,10 +4,10 @@ import (
"errors"
"fmt"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index e80132489..9f17f1e4c 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -13,7 +13,7 @@ import (
func (r *request) assemble(ctx context.Context) {
if !r.canAssembleComplexObject() {
- r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
+ r.log.Debug(logs.GetCanNotAssembleTheObject)
return
}
@@ -35,23 +35,23 @@ func (r *request) assemble(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(ctx, logs.GetTryingToAssembleTheObject)
+ r.log.Debug(logs.GetTryingToAssembleTheObject)
r.prm.common = r.prm.common.WithLocalOnly(false)
assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly())
- r.log.Debug(ctx, logs.GetAssemblingSplittedObject,
+ r.log.Debug(logs.GetAssemblingSplittedObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted,
+ defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil {
- r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject,
+ r.log.Warn(logs.GetFailedToAssembleSplittedObject,
zap.Error(err),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
@@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque
detachedExecutor.execute(ctx)
- return detachedExecutor.err
+ return detachedExecutor.statusError.err
}
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
index 59dd7fd93..a58602bf7 100644
--- a/pkg/services/object/get/assembleec.go
+++ b/pkg/services/object/get/assembleec.go
@@ -12,7 +12,7 @@ import (
func (r *request) assembleEC(ctx context.Context) {
if r.isRaw() {
- r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
+ r.log.Debug(logs.GetCanNotAssembleTheObject)
return
}
@@ -34,29 +34,29 @@ func (r *request) assembleEC(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject)
+ r.log.Debug(logs.GetTryingToAssembleTheECObject)
// initialize epoch number
- ok := r.initEpoch(ctx)
+ ok := r.initEpoch()
if !ok {
return
}
r.prm.common = r.prm.common.WithLocalOnly(false)
- assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
+ assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
- r.log.Debug(ctx, logs.GetAssemblingECObject,
+ r.log.Debug(logs.GetAssemblingECObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted,
+ defer r.log.Debug(logs.GetAssemblingECObjectCompleted,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) {
- r.log.Warn(ctx, logs.GetFailedToAssembleECObject,
+ r.log.Warn(logs.GetFailedToAssembleECObject,
zap.Error(err),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go
index b24c9417b..ff3f90bf2 100644
--- a/pkg/services/object/get/assembler.go
+++ b/pkg/services/object/get/assembler.go
@@ -2,7 +2,6 @@ package getsvc
import (
"context"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -60,24 +59,53 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS
if previousID == nil && len(childrenIDs) == 0 {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
}
-
if len(childrenIDs) > 0 {
- if a.rng != nil {
- err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer)
- } else {
- err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer)
+ if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil {
+ return nil, err
}
} else {
- if a.rng != nil {
- err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer)
- } else {
- err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer)
+ if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil {
+ return nil, err
}
}
+ return a.parentObject, nil
+}
+
+func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ var sourceObjectIDs []oid.ID
+ sourceObjectID, ok := a.splitInfo.Link()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ sourceObjectID, ok = a.splitInfo.LastPart()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ if len(sourceObjectIDs) == 0 {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ for _, sourceObjectID = range sourceObjectIDs {
+ obj, err := a.getParent(ctx, sourceObjectID, writer)
+ if err == nil {
+ return obj, nil
+ }
+ }
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+}
+
+func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
if err != nil {
return nil, err
}
- return a.parentObject, nil
+ parent := obj.Parent()
+ if parent == nil {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ if err := writer.WriteHeader(ctx, parent); err != nil {
+ return nil, err
+ }
+ return obj, nil
}
func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) {
@@ -162,16 +190,26 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD
}
func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ if a.rng == nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
+ }
+ return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true)
+ }
+
+ if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
return err
}
- return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true)
+ return writer.WriteChunk(ctx, a.parentObject.Payload())
}
func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
+ if a.rng == nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
+ }
}
+
if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil {
return err
}
@@ -181,9 +219,16 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev
return nil
}
-func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error {
+func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error {
+ withRng := len(partRanges) > 0 && a.rng != nil
+
for i := range partIDs {
- _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer)
+ var r *objectSDK.Range
+ if withRng {
+ r = &partRanges[i]
+ }
+
+ _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer)
if err != nil {
return err
}
@@ -192,13 +237,22 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec
}
func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, err := a.buildChain(ctx, prevID)
+ chain, rngs, err := a.buildChain(ctx, prevID)
if err != nil {
return err
}
- slices.Reverse(chain)
- return a.assemblePayloadByObjectIDs(ctx, writer, chain, false)
+ reverseRngs := len(rngs) > 0
+
+ for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 {
+ chain[left], chain[right] = chain[right], chain[left]
+
+ if reverseRngs {
+ rngs[left], rngs[right] = rngs[right], rngs[left]
+ }
+ }
+
+ return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false)
}
func (a *assembler) isChild(obj *objectSDK.Object) bool {
@@ -206,28 +260,63 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool {
return parent == nil || equalAddresses(a.addr, object.AddressOf(parent))
}
-func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) {
+func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
var (
chain []oid.ID
+ rngs []objectSDK.Range
+ from = a.rng.GetOffset()
+ to = from + a.rng.GetLength()
hasPrev = true
)
// fill the chain end-to-start
for hasPrev {
- head, err := a.objGetter.HeadObject(ctx, prevID)
- if err != nil {
- return nil, err
- }
- if !a.isChild(head) {
- return nil, errParentAddressDiffers
+ // check that only for "range" requests,
+ // for `GET` it stops via the false `withPrev`
+ if a.rng != nil && a.currentOffset <= from {
+ break
}
- id, _ := head.ID()
- chain = append(chain, id)
+ head, err := a.objGetter.HeadObject(ctx, prevID)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !a.isChild(head) {
+ return nil, nil, errParentAddressDiffers
+ }
+
+ if a.rng != nil {
+ sz := head.PayloadSize()
+
+ a.currentOffset -= sz
+
+ if a.currentOffset < to {
+ off := uint64(0)
+ if from > a.currentOffset {
+ off = from - a.currentOffset
+ sz -= from - a.currentOffset
+ }
+
+ if to < a.currentOffset+off+sz {
+ sz = to - off - a.currentOffset
+ }
+
+ index := len(rngs)
+ rngs = append(rngs, objectSDK.Range{})
+ rngs[index].SetOffset(off)
+ rngs[index].SetLength(sz)
+
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
+ } else {
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
prevID, hasPrev = head.PreviousID()
}
- return chain, nil
+ return chain, rngs, nil
}
diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go
deleted file mode 100644
index ff213cb82..000000000
--- a/pkg/services/object/get/assembler_head.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package getsvc
-
-import (
- "context"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- var sourceObjectIDs []oid.ID
- sourceObjectID, ok := a.splitInfo.Link()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- sourceObjectID, ok = a.splitInfo.LastPart()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- if len(sourceObjectIDs) == 0 {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- for _, sourceObjectID = range sourceObjectIDs {
- obj, err := a.getParent(ctx, sourceObjectID, writer)
- if err == nil {
- return obj, nil
- }
- }
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
-}
-
-func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
- obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
- if err != nil {
- return nil, err
- }
- parent := obj.Parent()
- if parent == nil {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- if err := writer.WriteHeader(ctx, parent); err != nil {
- return nil, err
- }
- return obj, nil
-}
diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go
deleted file mode 100644
index 780693c40..000000000
--- a/pkg/services/object/get/assembler_range.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package getsvc
-
-import (
- "context"
- "slices"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
- return err
- }
- return writer.WriteChunk(ctx, a.parentObject.Payload())
-}
-
-func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil {
- return err
- }
- if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part
- return err
- }
- return nil
-}
-
-func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error {
- for i := range partIDs {
- _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, rngs, err := a.buildChainRange(ctx, prevID)
- if err != nil {
- return err
- }
-
- slices.Reverse(chain)
- slices.Reverse(rngs)
- return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs)
-}
-
-func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
- var (
- chain []oid.ID
- rngs []objectSDK.Range
- from = a.rng.GetOffset()
- to = from + a.rng.GetLength()
-
- hasPrev = true
- )
-
- // fill the chain end-to-start
- for hasPrev && from < a.currentOffset {
- head, err := a.objGetter.HeadObject(ctx, prevID)
- if err != nil {
- return nil, nil, err
- }
- if !a.isChild(head) {
- return nil, nil, errParentAddressDiffers
- }
-
- nextOffset := a.currentOffset - head.PayloadSize()
- clampedFrom := max(from, nextOffset)
- clampedTo := min(to, a.currentOffset)
- if clampedFrom < clampedTo {
- index := len(rngs)
- rngs = append(rngs, objectSDK.Range{})
- rngs[index].SetOffset(clampedFrom - nextOffset)
- rngs[index].SetLength(clampedTo - clampedFrom)
-
- id, _ := head.ID()
- chain = append(chain, id)
- }
-
- a.currentOffset = nextOffset
- prevID, hasPrev = head.PreviousID()
- }
-
- return chain, rngs, nil
-}
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index e0a7e1da6..dde0d7dad 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -34,6 +34,7 @@ type assemblerec struct {
rng *objectSDK.Range
remoteStorage ecRemoteStorage
localStorage localStorage
+ cs container.Source
log *logger.Logger
head bool
traverserGenerator traverserGenerator
@@ -46,6 +47,7 @@ func newAssemblerEC(
rng *objectSDK.Range,
remoteStorage ecRemoteStorage,
localStorage localStorage,
+ cs container.Source,
log *logger.Logger,
head bool,
tg traverserGenerator,
@@ -57,6 +59,7 @@ func newAssemblerEC(
ecInfo: ecInfo,
remoteStorage: remoteStorage,
localStorage: localStorage,
+ cs: cs,
log: log,
head: head,
traverserGenerator: tg,
@@ -125,7 +128,7 @@ func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter
func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) {
objID := a.addr.Object()
- trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch)
+ trav, cnr, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch)
if err != nil {
return nil, err
}
@@ -155,7 +158,7 @@ func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Travers
parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount)
if err != nil {
- a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err))
+ a.log.Debug(logs.GetUnableToGetAllPartsECObject, zap.Error(err))
}
return parts
}
@@ -229,7 +232,7 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
var objID oid.ID
err := objID.ReadFromV2(ch.ID)
if err != nil {
- a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
return nil
}
var addr oid.Address
@@ -238,13 +241,15 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
var object *objectSDK.Object
if a.head {
object, err = a.localStorage.Head(ctx, addr, false)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ if err != nil {
+ a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ return nil
}
} else {
object, err = a.localStorage.Get(ctx, addr)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ if err != nil {
+ a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ return nil
}
}
return object
@@ -257,11 +262,11 @@ func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.N
var errECInfo *objectSDK.ECInfoError
_, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true)
if err == nil {
- a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
+ a.log.Error(logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
return nil
}
if !errors.As(err, &errECInfo) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
+ a.log.Warn(logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
return nil
}
result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks))
@@ -275,7 +280,7 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
var objID oid.ID
err := objID.ReadFromV2(ch.ID)
if err != nil {
- a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
return nil
}
var addr oid.Address
@@ -284,13 +289,15 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
var object *objectSDK.Object
if a.head {
object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ if err != nil {
+ a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ return nil
}
} else {
object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ if err != nil {
+ a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ return nil
}
}
return object
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index dfb31133c..d22b14192 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -10,25 +10,32 @@ import (
func (r *request) executeOnContainer(ctx context.Context) {
if r.isLocal() {
- r.log.Debug(ctx, logs.GetReturnResultDirectly)
+ r.log.Debug(logs.GetReturnResultDirectly)
return
}
lookupDepth := r.netmapLookupDepth()
- r.log.Debug(ctx, logs.TryingToExecuteInContainer,
+ r.log.Debug(logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- ok := r.initEpoch(ctx)
+ ok := r.initEpoch()
if !ok {
return
}
- localStatus := r.status
+ for {
+ if r.processCurrentEpoch(ctx) {
+ break
+ }
+
+ // check the maximum depth has been reached
+ if lookupDepth == 0 {
+ break
+ }
- for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 {
lookupDepth--
// go to the previous epoch
@@ -36,12 +43,12 @@ func (r *request) executeOnContainer(ctx context.Context) {
}
}
-func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
- r.log.Debug(ctx, logs.ProcessEpoch,
+func (r *request) processCurrentEpoch(ctx context.Context) bool {
+ r.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", r.curProcEpoch),
)
- traverser, ok := r.generateTraverser(ctx, r.address())
+ traverser, ok := r.generateTraverser(r.address())
if !ok {
return true
}
@@ -49,16 +56,12 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- if localStatus == statusEC { // possible only for raw == true and local == false
- r.status = statusEC
- } else {
- r.status = statusUndefined
- }
+ r.status = statusUndefined
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
+ r.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
return false
}
@@ -66,7 +69,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
for i := range addrs {
select {
case <-ctx.Done():
- r.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
+ r.log.Debug(logs.InterruptPlacementIterationByContext,
zap.Error(ctx.Err()),
)
@@ -82,7 +85,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
client.NodeInfoFromNetmapElement(&info, addrs[i])
if r.processNode(ctx, info) {
- r.log.Debug(ctx, logs.GetCompletingTheOperation)
+ r.log.Debug(logs.GetCompletingTheOperation)
return true
}
}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 3a50308c2..03b7f8bf2 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -87,51 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error {
exec.execute(ctx)
- return exec.err
+ return exec.statusError.err
}
-func (r *request) execute(ctx context.Context) {
- r.log.Debug(ctx, logs.ServingRequest)
+func (exec *request) execute(ctx context.Context) {
+ exec.log.Debug(logs.ServingRequest)
// perform local operation
- r.executeLocal(ctx)
+ exec.executeLocal(ctx)
- r.analyzeStatus(ctx, true)
+ exec.analyzeStatus(ctx, true)
}
-func (r *request) analyzeStatus(ctx context.Context, execCnr bool) {
+func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
- switch r.status {
+ switch exec.status {
case statusOK:
- r.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
case statusINHUMED:
- r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved)
+ exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
case statusVIRTUAL:
- r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual)
- r.assemble(ctx)
+ exec.log.Debug(logs.GetRequestedObjectIsVirtual)
+ exec.assemble(ctx)
case statusOutOfRange:
- r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds)
+ exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
case statusEC:
- r.log.Debug(ctx, logs.GetRequestedObjectIsEC)
- if r.isRaw() && execCnr {
- r.executeOnContainer(ctx)
- r.analyzeStatus(ctx, false)
+ exec.log.Debug(logs.GetRequestedObjectIsEC)
+ if exec.isRaw() && execCnr {
+ exec.executeOnContainer(ctx)
+ exec.analyzeStatus(ctx, false)
}
- r.assembleEC(ctx)
+ exec.assembleEC(ctx)
default:
- r.log.Debug(ctx, logs.OperationFinishedWithError,
- zap.Error(r.err),
+ exec.log.Debug(logs.OperationFinishedWithError,
+ zap.Error(exec.err),
)
var errAccessDenied *apistatus.ObjectAccessDenied
- if execCnr && errors.As(r.err, &errAccessDenied) {
+ if execCnr && errors.As(exec.err, &errAccessDenied) {
// Local get can't return access denied error, so this error was returned by
// write to the output stream. So there is no need to try to find object on other nodes.
return
}
if execCnr {
- r.executeOnContainer(ctx)
- r.analyzeStatus(ctx, false)
+ exec.executeOnContainer(ctx)
+ exec.analyzeStatus(ctx, false)
}
}
}
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 3efc72065..6827018dc 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -63,7 +63,7 @@ type testClient struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
+func (e testEpochReceiver) Epoch() (uint64, error) {
return uint64(e), nil
}
@@ -79,7 +79,7 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
+func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
opts := make([]placement.Option, 0, 4)
opts = append(opts,
placement.ForContainer(g.c),
@@ -91,13 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.
opts = append(opts, placement.ForObject(*obj))
}
- t, err := placement.NewTraverser(context.Background(), opts...)
+ t, err := placement.NewTraverser(opts...)
return t, &containerCore.Container{
Value: g.c,
}, err
}
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go
index 83ef54744..a6882d4a8 100644
--- a/pkg/services/object/get/getrangeec_test.go
+++ b/pkg/services/object/get/getrangeec_test.go
@@ -6,12 +6,12 @@ import (
"fmt"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -28,14 +28,14 @@ type containerStorage struct {
cnt *container.Container
}
-func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) {
+func (cs *containerStorage) Get(cid.ID) (*coreContainer.Container, error) {
coreCnt := coreContainer.Container{
Value: *cs.cnt,
}
return &coreCnt, nil
}
-func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
+func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) {
return nil, nil
}
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index cfabb082f..1cd5e549c 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -31,7 +31,7 @@ func (r *request) executeLocal(ctx context.Context) {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err))
+ r.log.Debug(logs.GetLocalGetFailed, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 78ca5b5e3..163767c43 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -18,9 +18,9 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
defer span.End()
- r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
+ r.log.Debug(logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
- rs, ok := r.getRemoteStorage(ctx, info)
+ rs, ok := r.getRemoteStorage(info)
if !ok {
return true
}
@@ -35,13 +35,13 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
switch {
default:
- r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err))
+ r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err))
if r.status != statusEC {
// for raw requests, continue to collect other parts
r.status = statusUndefined
if errors.As(err, &errAccessDenied) {
r.err = err
- } else if r.err == nil || !errors.As(r.err, &errAccessDenied) {
+ } else {
r.err = new(apistatus.ObjectNotFound)
}
}
diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go
index 2c64244cf..0df67dec9 100644
--- a/pkg/services/object/get/remote_getter.go
+++ b/pkg/services/object/get/remote_getter.go
@@ -30,7 +30,7 @@ func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Ob
if err != nil {
return nil, err
}
- epoch, err := g.es.Epoch(ctx)
+ epoch, err := g.es.Epoch()
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go
index 268080486..1a7a43a35 100644
--- a/pkg/services/object/get/request.go
+++ b/pkg/services/object/get/request.go
@@ -47,14 +47,14 @@ func (r *request) setLogger(l *logger.Logger) {
req = "GET_RANGE"
}
- r.log = l.With(
+ r.log = &logger.Logger{Logger: l.With(
zap.String("request", req),
zap.Stringer("address", r.address()),
zap.Bool("raw", r.isRaw()),
zap.Bool("local", r.isLocal()),
zap.Bool("with session", r.prm.common.SessionToken() != nil),
zap.Bool("with bearer", r.prm.common.BearerToken() != nil),
- )
+ )}
}
func (r *request) isLocal() bool {
@@ -116,20 +116,20 @@ func (r *request) netmapLookupDepth() uint64 {
return r.prm.common.NetmapLookupDepth()
}
-func (r *request) initEpoch(ctx context.Context) bool {
+func (r *request) initEpoch() bool {
r.curProcEpoch = r.netmapEpoch()
if r.curProcEpoch > 0 {
return true
}
- e, err := r.epochSource.Epoch(ctx)
+ e, err := r.epochSource.Epoch()
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
+ r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
return false
case err == nil:
@@ -138,17 +138,17 @@ func (r *request) initEpoch(ctx context.Context) bool {
}
}
-func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) {
+func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) {
obj := addr.Object()
- t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch)
+ t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch)
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
return nil, false
case err == nil:
@@ -156,13 +156,13 @@ func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*pla
}
}
-func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) {
+func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) {
rs, err := r.remoteStorageConstructor.Get(info)
if err != nil {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient)
+ r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient)
return nil, false
}
@@ -185,7 +185,7 @@ func (r *request) writeCollectedHeader(ctx context.Context) bool {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
@@ -206,7 +206,7 @@ func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object)
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index a103f5a7f..3413abeb7 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -34,7 +34,7 @@ func New(
result := &Service{
keyStore: ks,
epochSource: es,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
localStorage: &engineLocalStorage{
engine: e,
},
@@ -53,6 +53,6 @@ func New(
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(s *Service) {
- s.log = l
+ s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))}
}
}
diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go
index 664366d1b..9669afdba 100644
--- a/pkg/services/object/get/types.go
+++ b/pkg/services/object/get/types.go
@@ -20,11 +20,11 @@ import (
)
type epochSource interface {
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
}
type traverserGenerator interface {
- GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
type keyStorage interface {
diff --git a/pkg/services/object/get/v2/errors.go b/pkg/services/object/get/v2/errors.go
index aaa09b891..213455e10 100644
--- a/pkg/services/object/get/v2/errors.go
+++ b/pkg/services/object/get/v2/errors.go
@@ -4,8 +4,8 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- refs "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
)
var (
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
index 60fcd7fbf..18194c740 100644
--- a/pkg/services/object/get/v2/get_forwarder.go
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -7,16 +7,16 @@ import (
"io"
"sync"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go
index a44616fc9..10ecfc4a3 100644
--- a/pkg/services/object/get/v2/get_range_forwarder.go
+++ b/pkg/services/object/get/v2/get_range_forwarder.go
@@ -7,15 +7,15 @@ import (
"io"
"sync"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go
index 308ccd512..e97b60f66 100644
--- a/pkg/services/object/get/v2/get_range_hash.go
+++ b/pkg/services/object/get/v2/get_range_hash.go
@@ -5,15 +5,15 @@ import (
"encoding/hex"
"fmt"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -22,7 +22,7 @@ import (
// GetRangeHash calls internal service and returns v2 response.
func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- forward, err := s.needToForwardGetRangeHashRequest(ctx, req)
+ forward, err := s.needToForwardGetRangeHashRequest(req)
if err != nil {
return nil, err
}
@@ -48,7 +48,7 @@ type getRangeForwardParams struct {
address oid.Address
}
-func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
+func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
if req.GetMetaHeader().GetTTL() <= 1 {
return getRangeForwardParams{}, nil
}
@@ -66,17 +66,17 @@ func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *obj
}
result.address = addr
- cont, err := s.contSource.Get(ctx, addr.Container())
+ cont, err := s.contSource.Get(addr.Container())
if err != nil {
return result, fmt.Errorf("(%T) could not get container: %w", s, err)
}
- epoch, err := s.netmapSource.Epoch(ctx)
+ epoch, err := s.netmapSource.Epoch()
if err != nil {
return result, fmt.Errorf("(%T) could not get epoch: %w", s, err)
}
- nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch)
+ nm, err := s.netmapSource.GetNetMapByEpoch(epoch)
if err != nil {
return result, fmt.Errorf("(%T) could not get netmap: %w", s, err)
}
@@ -84,7 +84,7 @@ func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *obj
builder := placement.NewNetworkMapBuilder(nm)
objectID := addr.Object()
- nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy())
+ nodesVector, err := builder.BuildPlacement(addr.Container(), &objectID, cont.Value.PlacementPolicy())
if err != nil {
return result, fmt.Errorf("(%T) could not build object placement: %w", s, err)
}
@@ -125,14 +125,14 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
var addrGr network.AddressGroup
if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil {
- s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ s.log.Warn(logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
continue
}
var extAddr network.AddressGroup
if len(node.ExternalAddresses()) > 0 {
if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil {
- s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ s.log.Warn(logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
continue
}
}
@@ -150,12 +150,12 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
if firstErr == nil {
firstErr = err
}
- s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode,
+ s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromNode,
zap.String("node_public_key", hex.EncodeToString(node.PublicKey())),
zap.Stringer("address", params.address),
zap.Error(err))
}
- s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
+ s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
if firstErr != nil {
return nil, firstErr
}
diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go
index 56056398d..5e16008b8 100644
--- a/pkg/services/object/get/v2/head_forwarder.go
+++ b/pkg/services/object/get/v2/head_forwarder.go
@@ -5,15 +5,15 @@ import (
"crypto/ecdsa"
"sync"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go
index 0ec8912fd..edd19b441 100644
--- a/pkg/services/object/get/v2/service.go
+++ b/pkg/services/object/get/v2/service.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -11,7 +12,6 @@ import (
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
@@ -60,7 +60,7 @@ func NewService(svc *getsvc.Service,
netmapSource: netmapSource,
announcedKeys: announcedKeys,
contSource: contSource,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
}
for i := range opts {
@@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get V2 service"))}
}
}
diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go
index 0d73bcd4d..ce9a5c767 100644
--- a/pkg/services/object/get/v2/streamer.go
+++ b/pkg/services/object/get/v2/streamer.go
@@ -3,8 +3,8 @@ package getsvc
import (
"context"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
@@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec
p.SetHeader(objV2.GetHeader())
p.SetSignature(objV2.GetSignature())
- return s.Send(newResponse(p))
+ return s.GetObjectStream.Send(newResponse(p))
}
func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error {
p := new(objectV2.GetObjectPartChunk)
p.SetChunk(chunk)
- return s.Send(newResponse(p))
+ return s.GetObjectStream.Send(newResponse(p))
}
func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
@@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
}
func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error {
- return s.Send(newRangeResponse(chunk))
+ return s.GetObjectRangeStream.Send(newRangeResponse(chunk))
}
func newRangeResponse(p []byte) *objectV2.GetRangeResponse {
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index e699a3779..852c2aec3 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -3,20 +3,19 @@ package getsvc
import (
"context"
"crypto/sha256"
- "errors"
"hash"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/status"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/status"
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -183,7 +182,9 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran
default:
return nil, errUnknownChechsumType(t)
case refs.SHA256:
- p.SetHashGenerator(sha256.New)
+ p.SetHashGenerator(func() hash.Hash {
+ return sha256.New()
+ })
case refs.TillichZemor:
p.SetHashGenerator(func() hash.Hash {
return tz.New()
@@ -359,20 +360,19 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien
info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) {
var err error
+
+ defer func() {
+ stop = err == nil
+
+ if stop || firstErr == nil {
+ firstErr = err
+ }
+
+ // would be nice to log otherwise
+ }()
+
res, err = f(ctx, addr, c, key)
- // non-status logic error that could be returned
- // from the SDK client; should not be considered
- // as a connection error
- var siErr *objectSDK.SplitInfoError
- var eiErr *objectSDK.ECInfoError
-
- stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr)
-
- if stop || firstErr == nil {
- firstErr = err
- }
-
return
})
diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go
index 3e8832640..2c405070d 100644
--- a/pkg/services/object/internal/client/client.go
+++ b/pkg/services/object/internal/client/client.go
@@ -7,11 +7,9 @@ import (
"errors"
"fmt"
"io"
- "strconv"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -33,8 +31,6 @@ type commonPrm struct {
local bool
xHeaders []string
-
- netmapEpoch uint64
}
// SetClient sets base client for ForstFS API communication.
@@ -77,14 +73,6 @@ func (x *commonPrm) SetXHeaders(hs []string) {
x.xHeaders = hs
}
-func (x *commonPrm) calculateXHeaders() []string {
- hs := x.xHeaders
- if x.netmapEpoch != 0 {
- hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10))
- }
- return hs
-}
-
type readPrmCommon struct {
commonPrm
}
@@ -92,8 +80,8 @@ type readPrmCommon struct {
// SetNetmapEpoch sets the epoch number to be used to locate the objectSDK.
//
// By default current epoch on the server will be used.
-func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) {
- x.netmapEpoch = epoch
+func (x *readPrmCommon) SetNetmapEpoch(_ uint64) {
+ // FIXME(@fyrchik): https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/465
}
// GetObjectPrm groups parameters of GetObject operation.
@@ -151,7 +139,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Key = prm.key
@@ -245,7 +233,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error)
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams)
if err == nil {
@@ -338,7 +326,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Length = prm.ln
@@ -402,7 +390,7 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
defer span.End()
prmCli := client.PrmObjectPutInit{
- XHeaders: prm.calculateXHeaders(),
+ XHeaders: prm.xHeaders,
BearerToken: prm.tokenBearer,
Session: prm.tokenSession,
Local: true,
@@ -449,7 +437,7 @@ func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, erro
}
prmCli := client.PrmObjectPutSingle{
- XHeaders: prm.calculateXHeaders(),
+ XHeaders: prm.xHeaders,
BearerToken: prm.tokenBearer,
Session: prm.tokenSession,
Local: true,
@@ -508,7 +496,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
prm.cliPrm.Local = prm.local
prm.cliPrm.Session = prm.tokenSession
prm.cliPrm.BearerToken = prm.tokenBearer
- prm.cliPrm.XHeaders = prm.calculateXHeaders()
+ prm.cliPrm.XHeaders = prm.xHeaders
prm.cliPrm.Key = prm.key
rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm)
diff --git a/pkg/services/object/internal/key.go b/pkg/services/object/internal/key.go
index 1e0a7ef90..eba716976 100644
--- a/pkg/services/object/internal/key.go
+++ b/pkg/services/object/internal/key.go
@@ -3,8 +3,8 @@ package internal
import (
"bytes"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
// VerifyResponseKeyV2 checks if response is signed with expected key. Returns client.ErrWrongPublicKey if not.
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index 6a6ee0f0f..61aed5003 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -4,9 +4,8 @@ import (
"context"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type (
@@ -35,7 +34,7 @@ type (
}
MetricRegister interface {
- AddRequestDuration(string, time.Duration, bool, string)
+ AddRequestDuration(string, time.Duration, bool)
AddPayloadSize(string, int)
}
)
@@ -52,7 +51,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
if m.enabled {
t := time.Now()
defer func() {
- m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("Get", time.Since(t), err == nil)
}()
err = m.next.Get(req, &getStreamMetric{
ServerStream: stream,
@@ -65,11 +64,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
return
}
-func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
+func (m MetricCollector) Put() (PutObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Put(ctx)
+ stream, err := m.next.Put()
if err != nil {
return nil, err
}
@@ -80,14 +79,14 @@ func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
start: t,
}, nil
}
- return m.next.Put(ctx)
+ return m.next.Put()
}
-func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) {
+func (m MetricCollector) Patch() (PatchObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Patch(ctx)
+ stream, err := m.next.Patch()
if err != nil {
return nil, err
}
@@ -98,7 +97,7 @@ func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) {
start: t,
}, nil
}
- return m.next.Patch(ctx)
+ return m.next.Patch()
}
func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) {
@@ -107,7 +106,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl
res, err := m.next.PutSingle(ctx, request)
- m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil)
if err == nil {
m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload()))
}
@@ -123,7 +122,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest)
res, err := m.next.Head(ctx, request)
- m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("Head", time.Since(t), err == nil)
return res, err
}
@@ -136,7 +135,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream)
err := m.next.Search(req, stream)
- m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("Search", time.Since(t), err == nil)
return err
}
@@ -149,7 +148,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque
res, err := m.next.Delete(ctx, request)
- m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil)
return res, err
}
return m.next.Delete(ctx, request)
@@ -161,7 +160,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR
err := m.next.GetRange(req, stream)
- m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil)
return err
}
@@ -174,7 +173,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa
res, err := m.next.GetRangeHash(ctx, request)
- m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil)
return res, err
}
@@ -210,7 +209,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error
func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
+ s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil)
return res, err
}
@@ -224,7 +223,7 @@ func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) e
func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
+ s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil)
return res, err
}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index 5d298bfed..f1082dfff 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -26,9 +26,9 @@ func NewService(cfg *objectwriter.Config,
}
}
-// Patch calls internal service and returns v2 object streamer.
+// Put calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
- nodeKey, err := s.KeyStorage.GetKey(nil)
+ nodeKey, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index ff13b1d3e..85c28cda0 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -7,13 +7,13 @@ import (
"fmt"
"io"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher"
@@ -57,31 +57,12 @@ func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
return hs
}
-func isLinkObject(hdr *objectV2.HeaderWithSignature) bool {
- split := hdr.GetHeader().GetSplit()
- return len(split.GetChildren()) > 0 && split.GetParent() != nil
-}
-
-func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool {
- return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil
-}
-
func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
hdrWithSig, addr, err := s.readHeader(ctx, req)
if err != nil {
return err
}
- if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular {
- return errors.New("non-regular object can't be patched")
- }
- if isLinkObject(hdrWithSig) {
- return errors.New("linking object can't be patched")
- }
- if isComplexObjectPart(hdrWithSig) {
- return errors.New("complex object parts can't be patched")
- }
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return err
@@ -112,10 +93,11 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
}
oV2.GetHeader().SetOwnerID(ownerID)
- target, err := target.New(ctx, objectwriter.Params{
- Config: s.Config,
- Common: commonPrm,
- Header: objectSDK.NewFromV2(oV2),
+ target, err := target.New(&objectwriter.Params{
+ Config: s.Config,
+ Common: commonPrm,
+ Header: objectSDK.NewFromV2(oV2),
+ SignRequestPrivateKey: s.localNodeKey,
})
if err != nil {
return fmt.Errorf("target creation: %w", err)
@@ -195,12 +177,7 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
patch.FromV2(req.GetBody())
if !s.nonFirstSend {
- err := s.patcher.ApplyHeaderPatch(ctx,
- patcher.ApplyHeaderPatchPrm{
- NewSplitHeader: patch.NewSplitHeader,
- NewAttributes: patch.NewAttributes,
- ReplaceAttributes: patch.ReplaceAttributes,
- })
+ err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes)
if err != nil {
return fmt.Errorf("patch attributes: %w", err)
}
@@ -219,9 +196,6 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
}
func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
- if s.patcher == nil {
- return nil, errors.New("uninitialized patch streamer")
- }
patcherResp, err := s.patcher.Close(ctx)
if err != nil {
return nil, err
diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go
index b9416789c..4f3c3ef17 100644
--- a/pkg/services/object/patch/util.go
+++ b/pkg/services/object/patch/util.go
@@ -6,8 +6,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go
index 52a7c102c..0c8f12b45 100644
--- a/pkg/services/object/put/prm.go
+++ b/pkg/services/object/put/prm.go
@@ -2,6 +2,7 @@ package putsvc
import (
"context"
+ "crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -20,6 +21,8 @@ type PutInitPrm struct {
traverseOpts []placement.Option
relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+
+ privateKey *ecdsa.PrivateKey
}
type PutChunkPrm struct {
@@ -65,3 +68,11 @@ func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm {
return p
}
+
+func (p *PutInitPrm) WithPrivateKey(v *ecdsa.PrivateKey) *PutInitPrm {
+ if p != nil {
+ p.privateKey = v
+ }
+
+ return p
+}
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index 7aeb5857d..8cf4f0d62 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -26,7 +27,9 @@ func NewService(ks *objutil.KeyStorage,
opts ...objectwriter.Option,
) *Service {
c := &objectwriter.Config{
- Logger: logger.NewLoggerWrapper(zap.L()),
+ RemotePool: util.NewPseudoWorkerPool(),
+ LocalPool: util.NewPseudoWorkerPool(),
+ Logger: &logger.Logger{Logger: zap.L()},
KeyStorage: ks,
ClientConstructor: cc,
MaxSizeSrc: ms,
@@ -56,8 +59,8 @@ func NewService(ks *objutil.KeyStorage,
}
}
-func (s *Service) Put() (*Streamer, error) {
+func (p *Service) Put() (*Streamer, error) {
return &Streamer{
- Config: s.Config,
+ Config: p.Config,
}, nil
}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 90f473254..9b4163268 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -9,6 +9,11 @@ import (
"hash"
"sync"
+ objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
@@ -21,14 +26,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
@@ -86,7 +86,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest
}
func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
- if err := s.validarePutSingleSize(ctx, obj); err != nil {
+ if err := s.validarePutSingleSize(obj); err != nil {
return object.ContentMeta{}, err
}
@@ -97,12 +97,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object)
return s.validatePutSingleObject(ctx, obj)
}
-func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error {
+func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error {
if uint64(len(obj.Payload())) != obj.PayloadSize() {
return target.ErrWrongPayloadSize
}
- maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx)
+ maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize()
if obj.PayloadSize() > maxAllowedSize {
return target.ErrExceedingMaxSize
}
@@ -153,7 +153,7 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob
func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
localOnly := req.GetMetaHeader().GetTTL() <= 1
- placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly)
+ placement, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly)
if err != nil {
return err
}
@@ -166,18 +166,18 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
}
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
- iter := s.NewNodeIterator(placement.placementOptions)
+ iter := s.Config.NewNodeIterator(placement.placementOptions)
iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.KeyStorage,
+ keyStorage: s.Config.KeyStorage,
signer: &sync.Once{},
}
return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error {
- return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container)
+ return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
})
}
@@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
if err != nil {
return err
}
- key, err := s.KeyStorage.GetKey(nil)
+ key, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
return err
}
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.KeyStorage,
+ keyStorage: s.Config.KeyStorage,
signer: &sync.Once{},
}
@@ -218,14 +218,14 @@ type putSinglePlacement struct {
resetSuccessAfterOnBroadcast bool
}
-func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
+func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
var result putSinglePlacement
cnrID, ok := obj.ContainerID()
if !ok {
return result, errors.New("missing container ID")
}
- cnrInfo, err := s.ContainerSource.Get(ctx, cnrID)
+ cnrInfo, err := s.Config.ContainerSource.Get(cnrID)
if err != nil {
return result, fmt.Errorf("could not get container by ID: %w", err)
}
@@ -249,31 +249,31 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS
}
result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
- latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource)
+ latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource)
if err != nil {
return result, fmt.Errorf("could not get latest network map: %w", err)
}
builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
- builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys)
+ builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys)
}
result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
return result, nil
}
func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object,
- signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container,
+ signer *putSingleRequestSigner, meta object.ContentMeta,
) error {
if nodeDesc.Local {
- return s.saveLocal(ctx, obj, meta, container)
+ return s.saveLocal(ctx, obj, meta)
}
var info client.NodeInfo
client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
- c, err := s.ClientConstructor.Get(info)
+ c, err := s.Config.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
@@ -281,10 +281,9 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
return s.redirectPutSingleRequest(ctx, signer, obj, info, c)
}
-func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
+func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error {
localTarget := &objectwriter.LocalTarget{
- Storage: s.LocalStore,
- Container: container,
+ Storage: s.Config.LocalStore,
}
return localTarget.WriteObject(ctx, obj, meta)
}
@@ -317,11 +316,12 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
- s.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
+ s.Config.Logger.Warn(logs.PutSingleRedirectFailure,
zap.Error(err),
zap.Stringer("address", addr),
zap.Stringer("object_id", objID),
zap.Stringer("container_id", cnrID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
@@ -350,12 +350,8 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
err = signature.VerifyServiceMessage(resp)
if err != nil {
err = fmt.Errorf("response verification failed: %w", err)
- return
}
- st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus())
- err = apistatus.ErrFromStatus(st)
-
return
})
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 19768b7fa..f3803d433 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
@@ -14,6 +15,8 @@ type Streamer struct {
*objectwriter.Config
target transformer.ChunkedObjectWriter
+
+ relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
}
var errNotInit = errors.New("stream not initialized")
@@ -26,17 +29,17 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
}
// initialize destination target
- prmTarget := objectwriter.Params{
+ prmTarget := &objectwriter.Params{
Config: p.Config,
Common: prm.common,
Header: prm.hdr,
Container: prm.cnr,
TraverseOpts: prm.traverseOpts,
- Relay: prm.relay,
+ Relay: p.relay,
}
var err error
- p.target, err = target.New(ctx, prmTarget)
+ p.target, err = target.New(prmTarget)
if err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go
index 78d4c711d..db902ae59 100644
--- a/pkg/services/object/put/v2/service.go
+++ b/pkg/services/object/put/v2/service.go
@@ -4,10 +4,10 @@ import (
"context"
"fmt"
+ objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Put operation of Object service v2.
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index f0c648187..5bf15b4cd 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -4,6 +4,11 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
@@ -12,11 +17,6 @@ import (
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -56,10 +56,10 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
s.saveChunks = v.GetSignature() != nil
if s.saveChunks {
- maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx)
+ maxSz := s.stream.MaxSizeSrc.MaxObjectSize()
s.sizes = &sizes{
- payloadSz: v.GetHeader().GetPayloadLength(),
+ payloadSz: uint64(v.GetHeader().GetPayloadLength()),
}
// check payload size limit overflow
diff --git a/pkg/services/object/put/v2/util.go b/pkg/services/object/put/v2/util.go
index 5ec9ebe10..a157a9542 100644
--- a/pkg/services/object/put/v2/util.go
+++ b/pkg/services/object/put/v2/util.go
@@ -1,10 +1,10 @@
package putsvc
import (
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go
deleted file mode 100644
index 01eb1ea8d..000000000
--- a/pkg/services/object/qos.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package object
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
-)
-
-var _ ServiceServer = (*qosObjectService)(nil)
-
-type AdjustIOTag interface {
- AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
-}
-
-type qosObjectService struct {
- next ServiceServer
- adj AdjustIOTag
-}
-
-func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer {
- return &qosObjectService{
- next: next,
- adj: adjIOTag,
- }
-}
-
-func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Delete(ctx, req)
-}
-
-func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Get(req, &qosReadStream[*object.GetResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.GetRangeHash(ctx, req)
-}
-
-func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Head(ctx, req)
-}
-
-func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) {
- s, err := q.next.Patch(ctx)
- if err != nil {
- return nil, err
- }
- return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{
- s: s,
- adj: q.adj,
- }, nil
-}
-
-func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) {
- s, err := q.next.Put(ctx)
- if err != nil {
- return nil, err
- }
- return &qosWriteStream[*object.PutRequest, *object.PutResponse]{
- s: s,
- adj: q.adj,
- }, nil
-}
-
-func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.PutSingle(ctx, req)
-}
-
-func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Search(req, &qosReadStream[*object.SearchResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-type qosSend[T any] interface {
- Send(T) error
-}
-
-type qosReadStream[T any] struct {
- sender qosSend[T]
- ctxF func() context.Context
-}
-
-func (g *qosReadStream[T]) Context() context.Context {
- return g.ctxF()
-}
-
-func (g *qosReadStream[T]) Send(resp T) error {
- return g.sender.Send(resp)
-}
-
-type qosVerificationHeader interface {
- GetVerificationHeader() *session.RequestVerificationHeader
-}
-
-type qosSendRecv[TReq qosVerificationHeader, TResp any] interface {
- Send(context.Context, TReq) error
- CloseAndRecv(context.Context) (TResp, error)
-}
-
-type qosWriteStream[TReq qosVerificationHeader, TResp any] struct {
- s qosSendRecv[TReq, TResp]
- adj AdjustIOTag
-
- ioTag string
- ioTagDefined bool
-}
-
-func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) {
- if q.ioTagDefined {
- ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
- }
- return q.s.CloseAndRecv(ctx)
-}
-
-func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error {
- if !q.ioTagDefined {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx)
- }
- assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment")
- ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
- return q.s.Send(ctx, req)
-}
diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/remote_reader.go
index bc6ffd160..18b6107cf 100644
--- a/pkg/services/object/remote_reader.go
+++ b/pkg/services/object/remote_reader.go
@@ -2,6 +2,7 @@ package object
import (
"context"
+ "errors"
"fmt"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -34,6 +35,8 @@ type RemoteRequestPrm struct {
const remoteOpTTL = 1
+var ErrNotFound = errors.New("object header not found")
+
// NewRemoteReader creates, initializes and returns new RemoteHeader instance.
func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader {
return &RemoteReader{
diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go
new file mode 100644
index 000000000..95d4c9d93
--- /dev/null
+++ b/pkg/services/object/request_context.go
@@ -0,0 +1,26 @@
+package object
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+type RequestContextKeyT struct{}
+
+var RequestContextKey = RequestContextKeyT{}
+
+// RequestContext is a context passed between middleware handlers.
+type RequestContext struct {
+ Namespace string
+
+ SenderKey []byte
+
+ ContainerOwner user.ID
+
+ Role acl.Role
+
+ SoftAPECheck bool
+
+ BearerToken *bearer.Token
+}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index 80c971e8f..d7ba9f843 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -4,8 +4,8 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type ResponseService struct {
@@ -80,8 +80,8 @@ func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutRespo
return r, nil
}
-func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *ResponseService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -109,8 +109,8 @@ func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchR
return r, nil
}
-func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) {
- stream, err := s.svc.Patch(ctx)
+func (s *ResponseService) Patch() (PatchObjectStream, error) {
+ stream, err := s.svc.Patch()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index 60d469b11..d70574156 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -8,19 +8,18 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"go.uber.org/zap"
)
func (exec *execCtx) executeOnContainer(ctx context.Context) error {
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug(ctx, logs.TryingToExecuteInContainer,
+ exec.log.Debug(logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- if err := exec.initEpoch(ctx); err != nil {
+ if err := exec.initEpoch(); err != nil {
return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err)
}
@@ -44,11 +43,11 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ProcessEpoch,
+ exec.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
- traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch)
+ traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch)
if err != nil {
return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err)
}
@@ -59,7 +58,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
+ exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
break
}
@@ -72,8 +71,8 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
defer wg.Done()
select {
case <-ctx.Done():
- exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
- zap.Error(ctx.Err()))
+ exec.log.Debug(logs.InterruptPlacementIterationByContext,
+ zap.String("error", ctx.Err().Error()))
return
default:
}
@@ -82,18 +81,18 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+ exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
- exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err))
+ exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
return
}
ids, err := c.searchObjects(ctx, exec, info)
if err != nil {
- exec.log.Debug(ctx, logs.SearchRemoteOperationFailed,
- zap.Error(err))
+ exec.log.Debug(logs.SearchRemoteOperationFailed,
+ zap.String("error", err.Error()))
return
}
@@ -102,7 +101,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
err = exec.writeIDList(ids)
mtx.Unlock()
if err != nil {
- exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err))
+ exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
return
}
}(i)
@@ -113,12 +112,3 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
return nil
}
-
-func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) {
- cnrID := exec.containerID()
- cnr, err := exec.svc.containerSource.Get(ctx, cnrID)
- if err != nil {
- return containerSDK.Container{}, err
- }
- return cnr.Value, nil
-}
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index ced51ecce..4a2c04ecd 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -1,8 +1,6 @@
package searchsvc
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -21,13 +19,13 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = l.With(
+ exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "SEARCH"),
zap.Stringer("container", exec.containerID()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )
+ )}
}
func (exec *execCtx) isLocal() bool {
@@ -50,13 +48,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 {
return exec.prm.common.NetmapLookupDepth()
}
-func (exec *execCtx) initEpoch(ctx context.Context) error {
+func (exec *execCtx) initEpoch() error {
exec.curProcEpoch = exec.netmapEpoch()
if exec.curProcEpoch > 0 {
return nil
}
- e, err := exec.svc.currentEpochReceiver.Epoch(ctx)
+ e, err := exec.svc.currentEpochReceiver.Epoch()
if err != nil {
return err
}
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index ec65ab06a..cfaed13b8 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -11,7 +11,7 @@ import (
func (exec *execCtx) executeLocal(ctx context.Context) error {
ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil {
- exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err))
+ exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
return err
}
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 76c091f85..4a5c414d5 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -20,26 +20,26 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ServingRequest)
+ exec.log.Debug(logs.ServingRequest)
err := exec.executeLocal(ctx)
- exec.logResult(ctx, err)
+ exec.logResult(err)
if exec.isLocal() {
- exec.log.Debug(ctx, logs.SearchReturnResultDirectly)
+ exec.log.Debug(logs.SearchReturnResultDirectly)
return err
}
err = exec.executeOnContainer(ctx)
- exec.logResult(ctx, err)
+ exec.logResult(err)
return err
}
-func (exec *execCtx) logResult(ctx context.Context, err error) {
+func (exec *execCtx) logResult(err error) {
switch {
default:
- exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
+ exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
case err == nil:
- exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
}
}
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index 918ad421f..44abcfe5b 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -6,10 +6,10 @@ import (
"crypto/sha256"
"errors"
"fmt"
- "slices"
"strconv"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -17,7 +17,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -59,7 +58,7 @@ type simpleIDWriter struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
+func (e testEpochReceiver) Epoch() (uint64, error) {
return uint64(e), nil
}
@@ -82,8 +81,8 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
- t, err := placement.NewTraverser(context.Background(),
+func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
+ t, err := placement.NewTraverser(
placement.ForContainer(g.c),
placement.UseBuilder(g.b[epoch]),
placement.WithoutSuccessTracking(),
@@ -91,7 +90,7 @@ func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID
return t, &containerCore.Container{Value: g.c}, err
}
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
@@ -104,7 +103,8 @@ func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, o
return nil, errors.New("vectors for address not found")
}
- res := slices.Clone(vs)
+ res := make([][]netmap.NodeInfo, len(vs))
+ copy(res, vs)
return res, nil
}
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index 56fe56468..cc388c1b2 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -46,16 +46,14 @@ type cfg struct {
}
traverserGenerator interface {
- GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
currentEpochReceiver interface {
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
}
keyStore *util.KeyStorage
-
- containerSource container.Source
}
// New creates, initializes and returns utility serving
@@ -65,11 +63,10 @@ func New(e *engine.StorageEngine,
tg *util.TraverserGenerator,
ns netmap.Source,
ks *util.KeyStorage,
- cs container.Source,
opts ...Option,
) *Service {
c := &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
clientConstructor: &clientConstructorWrapper{
constructor: cc,
},
@@ -79,7 +76,6 @@ func New(e *engine.StorageEngine,
traverserGenerator: tg,
currentEpochReceiver: ns,
keyStore: ks,
- containerSource: cs,
}
for i := range opts {
@@ -94,6 +90,6 @@ func New(e *engine.StorageEngine,
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))}
}
}
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 0be5345b9..67b6c0d01 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -2,11 +2,9 @@ package searchsvc
import (
"context"
- "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -54,7 +52,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error {
}
// exclude processed address
- list = slices.Delete(list, i, i+1)
+ list = append(list[:i], list[i+1:]...)
i--
}
@@ -114,13 +112,9 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
}
func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
- cnr, err := exec.getContainer(ctx)
- if err != nil {
- return nil, err
- }
var selectPrm engine.SelectPrm
selectPrm.WithFilters(exec.searchFilters())
- selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr))
+ selectPrm.WithContainerID(exec.containerID())
r, err := e.storage.Select(ctx, selectPrm)
if err != nil {
diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go
index 7bb6e4d3c..5a2e9b936 100644
--- a/pkg/services/object/search/v2/request_forwarder.go
+++ b/pkg/services/object/search/v2/request_forwarder.go
@@ -8,14 +8,14 @@ import (
"io"
"sync"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/search/v2/service.go b/pkg/services/object/search/v2/service.go
index 856cd9f04..78b72ac79 100644
--- a/pkg/services/object/search/v2/service.go
+++ b/pkg/services/object/search/v2/service.go
@@ -1,10 +1,10 @@
package searchsvc
import (
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Search operation of Object service v2.
diff --git a/pkg/services/object/search/v2/streamer.go b/pkg/services/object/search/v2/streamer.go
index 93b281343..15e2d53d5 100644
--- a/pkg/services/object/search/v2/streamer.go
+++ b/pkg/services/object/search/v2/streamer.go
@@ -1,9 +1,9 @@
package searchsvc
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/search/v2/util.go b/pkg/services/object/search/v2/util.go
index 48ae98958..e971fa8e5 100644
--- a/pkg/services/object/search/v2/util.go
+++ b/pkg/services/object/search/v2/util.go
@@ -5,12 +5,12 @@ import (
"errors"
"fmt"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index e65293977..da98ce245 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -3,8 +3,8 @@ package object
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// GetObjectStream is an interface of FrostFS API v2 compatible object streamer.
@@ -41,8 +41,8 @@ type PatchObjectStream interface {
// serving v2 Object service.
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
- Put(context.Context) (PutObjectStream, error)
- Patch(context.Context) (PatchObjectStream, error)
+ Put() (PutObjectStream, error)
+ Patch() (PatchObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index fd8e926dd..35367aafe 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -5,11 +5,13 @@ import (
"crypto/ecdsa"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type SignService struct {
+ key *ecdsa.PrivateKey
+
sigSvc *util.SignService
svc ServiceServer
@@ -46,6 +48,7 @@ type getRangeStreamSigner struct {
func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService {
return &SignService{
+ key: key,
sigSvc: util.NewUnarySignService(key),
svc: svc,
}
@@ -96,16 +99,15 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- err = fmt.Errorf("could not close stream and receive response: %w", err)
- resp = new(object.PutResponse)
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *SignService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -133,16 +135,15 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- err = fmt.Errorf("could not close stream and receive response: %w", err)
- resp = new(object.PatchResponse)
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) {
- stream, err := s.svc.Patch(ctx)
+func (s *SignService) Patch() (PatchObjectStream, error) {
+ stream, err := s.svc.Patch()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index b446d3605..e560d6d8c 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -4,8 +4,8 @@ import (
"bytes"
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type (
@@ -87,12 +87,12 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream)
})
}
-func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) {
- return c.next.Put(ctx)
+func (c TransportSplitter) Put() (PutObjectStream, error) {
+ return c.next.Put()
}
-func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) {
- return c.next.Patch(ctx)
+func (c TransportSplitter) Patch() (PatchObjectStream, error) {
+ return c.next.Patch()
}
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
@@ -162,13 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error {
var newResp *object.SearchResponse
- for {
+ for ln := uint64(len(ids)); ; {
if newResp == nil {
newResp = new(object.SearchResponse)
newResp.SetBody(body)
}
- cut := min(s.addrAmount, uint64(len(ids)))
+ cut := min(s.addrAmount, ln)
body.SetIDList(ids[:cut])
newResp.SetMetaHeader(resp.GetMetaHeader())
diff --git a/pkg/services/object/util/key_test.go b/pkg/services/object/util/key_test.go
index 1753a26f7..cb7ddfde5 100644
--- a/pkg/services/object/util/key_test.go
+++ b/pkg/services/object/util/key_test.go
@@ -5,10 +5,10 @@ import (
"crypto/elliptic"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
tokenStorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go
index b10826226..92beedaa7 100644
--- a/pkg/services/object/util/log.go
+++ b/pkg/services/object/util/log.go
@@ -1,8 +1,6 @@
package util
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -10,10 +8,18 @@ import (
)
// LogServiceError writes error message of object service to provided logger.
-func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) {
- l.Error(ctx, logs.UtilObjectServiceError,
+func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) {
+ l.Error(logs.UtilObjectServiceError,
zap.String("node", network.StringifyGroup(node)),
zap.String("request", req),
- zap.Error(err),
+ zap.String("error", err.Error()),
+ )
+}
+
+// LogWorkerPoolError writes debug error message of object worker pool to provided logger.
+func LogWorkerPoolError(l *logger.Logger, req string, err error) {
+ l.Error(logs.UtilCouldNotPushTaskToWorkerPool,
+ zap.String("request", req),
+ zap.String("error", err.Error()),
)
}
diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go
index f74b0aab9..1bd39f9ea 100644
--- a/pkg/services/object/util/placement.go
+++ b/pkg/services/object/util/placement.go
@@ -1,9 +1,7 @@
package util
import (
- "context"
"fmt"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -45,8 +43,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu
}
}
-func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
+func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -78,8 +76,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac
}
}
-func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
+func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -94,7 +92,7 @@ func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *o
}
if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) {
- vs[i] = slices.Delete(vs[i], j, j+1)
+ vs[i] = append(vs[i][:j], vs[i][j+1:]...)
j--
}
}
@@ -124,15 +122,15 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav
// GenerateTraverser generates placement Traverser for provided object address
// using epoch-th network map.
-func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
+func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
// get network map by epoch
- nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch)
+ nm, err := g.netMapSrc.GetNetMapByEpoch(epoch)
if err != nil {
return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err)
}
// get container related container
- cnr, err := g.cnrSrc.Get(ctx, idCnr)
+ cnr, err := g.cnrSrc.Get(idCnr)
if err != nil {
return nil, nil, fmt.Errorf("could not get container: %w", err)
}
@@ -162,7 +160,7 @@ func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID
)
}
- t, err := placement.NewTraverser(ctx, traverseOpts...)
+ t, err := placement.NewTraverser(traverseOpts...)
if err != nil {
return nil, nil, err
}
diff --git a/pkg/services/object/util/prm.go b/pkg/services/object/util/prm.go
index 34d8ec704..022b9fe5b 100644
--- a/pkg/services/object/util/prm.go
+++ b/pkg/services/object/util/prm.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strconv"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
@@ -100,18 +100,11 @@ func (p *CommonPrm) SetNetmapLookupDepth(v uint64) {
// ForgetTokens forgets all the tokens read from the request's
// meta information before.
-func (p *CommonPrm) ForgetTokens() func() {
+func (p *CommonPrm) ForgetTokens() {
if p != nil {
- tk := p.token
- br := p.bearer
p.token = nil
p.bearer = nil
- return func() {
- p.token = tk
- p.bearer = br
- }
}
- return func() {}
}
func CommonPrmFromV2(req interface {
diff --git a/pkg/services/object_manager/placement/cache.go b/pkg/services/object_manager/placement/cache.go
index 2a8460ca5..217261877 100644
--- a/pkg/services/object_manager/placement/cache.go
+++ b/pkg/services/object_manager/placement/cache.go
@@ -3,7 +3,6 @@ package placement
import (
"crypto/sha256"
"fmt"
- "slices"
"sync"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -45,7 +44,7 @@ func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p
raw, ok := c.containerCache.Get(cnr)
c.mtx.Unlock()
if ok {
- return c.cloneResult(raw), nil
+ return raw, nil
}
} else {
c.lastEpoch = nm.Epoch()
@@ -66,13 +65,5 @@ func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p
c.containerCache.Add(cnr, cn)
}
c.mtx.Unlock()
- return c.cloneResult(cn), nil
-}
-
-func (c *ContainerNodesCache) cloneResult(nodes [][]netmapSDK.NodeInfo) [][]netmapSDK.NodeInfo {
- result := make([][]netmapSDK.NodeInfo, len(nodes))
- for repIdx := range nodes {
- result[repIdx] = slices.Clone(nodes[repIdx])
- }
- return result
+ return cn, nil
}
diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go
index 7242970b5..a890d5357 100644
--- a/pkg/services/object_manager/placement/cache_test.go
+++ b/pkg/services/object_manager/placement/cache_test.go
@@ -85,10 +85,7 @@ func TestContainerNodesCache(t *testing.T) {
})
t.Run("the error is propagated", func(t *testing.T) {
var pp netmapSDK.PlacementPolicy
- r := netmapSDK.ReplicaDescriptor{}
- r.SetNumberOfObjects(1)
- r.SetSelectorName("Missing")
- pp.AddReplicas(r)
+ require.NoError(t, pp.DecodeString("REP 1 SELECT 1 FROM X FILTER ATTR EQ 42 AS X"))
c := placement.NewContainerNodesCache(size)
_, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp)
diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go
deleted file mode 100644
index 0f24a9d96..000000000
--- a/pkg/services/object_manager/placement/metrics.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package placement
-
-import (
- "errors"
- "fmt"
- "maps"
- "math"
- "strings"
- "sync"
- "sync/atomic"
-
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
- locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-const (
- attrPrefix = "$attribute:"
-
- geoDistance = "$geoDistance"
-)
-
-type Metric interface {
- CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int
-}
-
-type metricsParser struct {
- locodeDBPath string
- locodes map[string]locodedb.Point
-}
-
-type MetricParser interface {
- ParseMetrics([]string) ([]Metric, error)
-}
-
-func NewMetricsParser(locodeDBPath string) (MetricParser, error) {
- return &metricsParser{
- locodeDBPath: locodeDBPath,
- }, nil
-}
-
-func (p *metricsParser) initLocodes() error {
- if len(p.locodes) != 0 {
- return nil
- }
- if len(p.locodeDBPath) > 0 {
- p.locodes = make(map[string]locodedb.Point)
- locodeDB := locodebolt.New(locodebolt.Prm{
- Path: p.locodeDBPath,
- },
- locodebolt.ReadOnly(),
- )
- err := locodeDB.Open()
- if err != nil {
- return err
- }
- defer locodeDB.Close()
- err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) {
- p.locodes[k] = v
- })
- if err != nil {
- return err
- }
- return nil
- }
- return errors.New("set path to locode database")
-}
-
-func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) {
- var metrics []Metric
- for _, raw := range priority {
- if attr, found := strings.CutPrefix(raw, attrPrefix); found {
- metrics = append(metrics, NewAttributeMetric(attr))
- } else if raw == geoDistance {
- err := p.initLocodes()
- if err != nil {
- return nil, err
- }
- if len(p.locodes) == 0 {
- return nil, fmt.Errorf("provide locodes database for metric %s", raw)
- }
- m := NewGeoDistanceMetric(p.locodes)
- metrics = append(metrics, m)
- } else {
- return nil, fmt.Errorf("unsupported priority metric %s", raw)
- }
- }
- return metrics, nil
-}
-
-// attributeMetric describes priority metric based on attribute.
-type attributeMetric struct {
- attribute string
-}
-
-// CalculateValue return [0] if from and to contains attribute attributeMetric.attribute and
-// the value of attribute is the same. In other case return [1].
-func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
- fromAttr := from.Attribute(am.attribute)
- toAttr := to.Attribute(am.attribute)
- if len(fromAttr) > 0 && len(toAttr) > 0 && fromAttr == toAttr {
- return 0
- }
- return 1
-}
-
-func NewAttributeMetric(attr string) Metric {
- return &attributeMetric{attribute: attr}
-}
-
-// geoDistanceMetric describes priority metric based on attribute.
-type geoDistanceMetric struct {
- locodes map[string]locodedb.Point
- distance *atomic.Pointer[map[string]int]
- mtx sync.Mutex
-}
-
-func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric {
- d := atomic.Pointer[map[string]int]{}
- m := make(map[string]int)
- d.Store(&m)
- gm := &geoDistanceMetric{
- locodes: locodes,
- distance: &d,
- }
- return gm
-}
-
-// CalculateValue return distance in kilometers between current node and provided,
-// if coordinates for provided node found. In other case return math.MaxInt.
-func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
- fl := from.LOCODE()
- tl := to.LOCODE()
- if fl == tl {
- return 0
- }
- m := gm.distance.Load()
- if v, ok := (*m)[fl+tl]; ok {
- return v
- }
- return gm.calculateDistance(fl, tl)
-}
-
-func (gm *geoDistanceMetric) calculateDistance(from, to string) int {
- gm.mtx.Lock()
- defer gm.mtx.Unlock()
- od := gm.distance.Load()
- if v, ok := (*od)[from+to]; ok {
- return v
- }
- nd := maps.Clone(*od)
- var dist int
- pointFrom, okFrom := gm.locodes[from]
- pointTo, okTo := gm.locodes[to]
- if okFrom && okTo {
- dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude()))
- } else {
- dist = math.MaxInt
- }
- nd[from+to] = dist
- gm.distance.Store(&nd)
-
- return dist
-}
-
-// distance return amount of KM between two points.
-// Parameters are latitude and longitude of point 1 and 2 in decimal degrees.
-// Original implementation can be found here https://www.geodatasource.com/developers/go.
-func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 {
- radLat1 := math.Pi * lt1 / 180
- radLat2 := math.Pi * lt2 / 180
- radTheta := math.Pi * (ln1 - ln2) / 180
-
- dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta)
-
- if dist > 1 {
- dist = 1
- }
-
- dist = math.Acos(dist)
- dist = dist * 180 / math.Pi
- dist = dist * 60 * 1.1515 * 1.609344
-
- return dist
-}
diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go
index b3f8d9c03..1782e27ea 100644
--- a/pkg/services/object_manager/placement/netmap.go
+++ b/pkg/services/object_manager/placement/netmap.go
@@ -1,7 +1,6 @@
package placement
import (
- "context"
"crypto/sha256"
"fmt"
@@ -36,12 +35,12 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder {
}
}
-func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) {
+func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) {
return s.nm, nil
}
-func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc)
+func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ nm, err := netmap.GetLatestNetworkMap(b.nmSrc)
if err != nil {
return nil, fmt.Errorf("could not get network map: %w", err)
}
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index a3f9af959..4e790628f 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -1,10 +1,8 @@
package placement
import (
- "context"
"errors"
"fmt"
- "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -22,12 +20,7 @@ type Builder interface {
//
// Must return all container nodes if object identifier
// is nil.
- BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
-}
-
-type NodeState interface {
- // LocalNodeInfo return current node state in FrostFS API v2 NodeInfo structure.
- LocalNodeInfo() *netmap.NodeInfo
+ BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
}
// Option represents placement traverser option.
@@ -57,10 +50,6 @@ type cfg struct {
policy netmap.PlacementPolicy
builder Builder
-
- metrics []Metric
-
- nodeState NodeState
}
const invalidOptsMsg = "invalid traverser options"
@@ -79,7 +68,7 @@ func defaultCfg() *cfg {
}
// NewTraverser creates, initializes with options and returns Traverser instance.
-func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
+func NewTraverser(opts ...Option) (*Traverser, error) {
cfg := defaultCfg()
for i := range opts {
@@ -99,7 +88,7 @@ func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy)
}
- ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy)
+ ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy)
if err != nil {
return nil, fmt.Errorf("could not build placement: %w", err)
}
@@ -110,20 +99,7 @@ func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
}
var rem []int
- if len(cfg.metrics) > 0 && cfg.nodeState != nil {
- rem = defaultCopiesVector(cfg.policy)
- var unsortedVector []netmap.NodeInfo
- var regularVector []netmap.NodeInfo
- for i := range rem {
- pivot := min(len(ns[i]), rem[i])
- unsortedVector = append(unsortedVector, ns[i][:pivot]...)
- regularVector = append(regularVector, ns[i][pivot:]...)
- }
- rem = []int{-1, -1}
-
- sortedVector := sortVector(cfg, unsortedVector)
- ns = [][]netmap.NodeInfo{sortedVector, regularVector}
- } else if cfg.flatSuccess != nil {
+ if cfg.flatSuccess != nil {
ns = flatNodes(ns)
rem = []int{int(*cfg.flatSuccess)}
} else {
@@ -181,35 +157,6 @@ func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo {
return [][]netmap.NodeInfo{flat}
}
-type nodeMetrics struct {
- index int
- metrics []int
-}
-
-func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo {
- nm := make([]nodeMetrics, len(unsortedVector))
- node := cfg.nodeState.LocalNodeInfo()
-
- for i := range unsortedVector {
- m := make([]int, len(cfg.metrics))
- for j, pm := range cfg.metrics {
- m[j] = pm.CalculateValue(node, &unsortedVector[i])
- }
- nm[i] = nodeMetrics{
- index: i,
- metrics: m,
- }
- }
- slices.SortStableFunc(nm, func(a, b nodeMetrics) int {
- return slices.Compare(a.metrics, b.metrics)
- })
- sortedVector := make([]netmap.NodeInfo, len(unsortedVector))
- for i := range unsortedVector {
- sortedVector[i] = unsortedVector[nm[i].index]
- }
- return sortedVector
-}
-
// Node is a descriptor of storage node with information required for intra-container communication.
type Node struct {
addresses network.AddressGroup
@@ -288,8 +235,8 @@ func (t *Traverser) Next() []Node {
func (t *Traverser) skipEmptyVectors() {
for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body
if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 {
- t.vectors = slices.Delete(t.vectors, i, i+1)
- t.rem = slices.Delete(t.rem, i, i+1)
+ t.vectors = append(t.vectors[:i], t.vectors[i+1:]...)
+ t.rem = append(t.rem[:i], t.rem[i+1:]...)
i--
} else {
break
@@ -375,17 +322,3 @@ func WithCopyNumbers(v []uint32) Option {
c.copyNumbers = v
}
}
-
-// WithPriorityMetrics use provided priority metrics to sort nodes.
-func WithPriorityMetrics(m []Metric) Option {
- return func(c *cfg) {
- c.metrics = m
- }
-}
-
-// WithNodeState provide state of the current node.
-func WithNodeState(s NodeState) Option {
- return func(c *cfg) {
- c.nodeState = s
- }
-}
diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go
index d1370f21e..b3b57677d 100644
--- a/pkg/services/object_manager/placement/traverser_test.go
+++ b/pkg/services/object_manager/placement/traverser_test.go
@@ -1,8 +1,6 @@
package placement
import (
- "context"
- "slices"
"strconv"
"testing"
@@ -19,14 +17,12 @@ type testBuilder struct {
vectors [][]netmap.NodeInfo
}
-func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return b.vectors, nil
}
func testNode(v uint32) (n netmap.NodeInfo) {
- ip := "/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v))
- n.SetNetworkEndpoints(ip)
- n.SetPublicKey([]byte(ip))
+ n.SetNetworkEndpoints("/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)))
return n
}
@@ -35,7 +31,8 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
vc := make([][]netmap.NodeInfo, 0, len(v))
for i := range v {
- ns := slices.Clone(v[i])
+ ns := make([]netmap.NodeInfo, len(v[i]))
+ copy(ns, v[i])
vc = append(vc, ns)
}
@@ -43,15 +40,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
return vc
}
-func testPlacement(ss []int, rs []int) ([][]netmap.NodeInfo, container.Container) {
- return placement(ss, rs, nil)
-}
-
-func testECPlacement(ss []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) {
- return placement(ss, nil, ec)
-}
-
-func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) {
+func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) {
nodes := make([][]netmap.NodeInfo, 0, len(rs))
replicas := make([]netmap.ReplicaDescriptor, 0, len(rs))
num := uint32(0)
@@ -67,12 +56,7 @@ func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.C
nodes = append(nodes, ns)
var rd netmap.ReplicaDescriptor
- if len(rs) > 0 {
- rd.SetNumberOfObjects(uint32(rs[i]))
- } else {
- rd.SetECDataCount(uint32(ec[i][0]))
- rd.SetECParityCount(uint32(ec[i][1]))
- }
+ rd.SetNumberOfObjects(uint32(rs[i]))
replicas = append(replicas, rd)
}
@@ -103,7 +87,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithoutSuccessTracking(),
@@ -132,7 +116,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -150,7 +134,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
err = n.FromIterator(netmapcore.Node(nodes[1][0]))
require.NoError(t, err)
- require.Equal(t, []Node{{addresses: n, key: []byte("/ip4/0.0.0.0/tcp/5")}}, tr.Next())
+ require.Equal(t, []Node{{addresses: n}}, tr.Next())
})
t.Run("put scenario", func(t *testing.T) {
@@ -161,7 +145,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
)
@@ -202,7 +186,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
nodes, cnr := testPlacement(selectors, replicas)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local)
@@ -277,7 +261,7 @@ func TestTraverserRemValues(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithCopyNumbers(testCase.copyNumbers),
@@ -291,363 +275,3 @@ func TestTraverserRemValues(t *testing.T) {
})
}
}
-
-type nodeState struct {
- node *netmap.NodeInfo
-}
-
-func (n *nodeState) LocalNodeInfo() *netmap.NodeInfo {
- return n.node
-}
-
-func TestTraverserPriorityMetrics(t *testing.T) {
- t.Run("one rep one metric", func(t *testing.T) {
- selectors := []int{4}
- replicas := []int{3}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "A")
- // Node_2, PK - ip4/0.0.0.0/tcp/2
- nodes[0][2].SetAttribute("ClusterName", "B")
- // Node_3, PK - ip4/0.0.0.0/tcp/3
- nodes[0][3].SetAttribute("ClusterName", "B")
-
- sdkNode := testNode(5)
- sdkNode.SetAttribute("ClusterName", "B")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{NewAttributeMetric("ClusterName")}
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `ClusterName` the order will be:
- // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}]
- // With priority metric `ClusterName` and current node in cluster B
- // the order should be:
- // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 3, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey()))
-
- next = tr.Next()
- // The last node is
- require.Equal(t, 1, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
- t.Run("one rep one metric fewer nodes", func(t *testing.T) {
- selectors := []int{2}
- replicas := []int{3}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "B")
-
- sdkNode := testNode(5)
- sdkNode.SetAttribute("ClusterName", "B")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{NewAttributeMetric("ClusterName")}
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `ClusterName` the order will be:
- // [ {Node_0 A}, {Node_1 A} ]
- // With priority metric `ClusterName` and current node in cluster B
- // the order should be:
- // [ {Node_1 B}, {Node_0 A} ]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
- t.Run("two reps two metrics", func(t *testing.T) {
- selectors := []int{3, 3}
- replicas := []int{2, 2}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // REPLICA #1
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- nodes[0][0].SetAttribute("UN-LOCODE", "RU LED")
-
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "A")
- nodes[0][1].SetAttribute("UN-LOCODE", "FI HEL")
-
- // Node_2, PK - ip4/0.0.0.0/tcp/2
- nodes[0][2].SetAttribute("ClusterName", "A")
- nodes[0][2].SetAttribute("UN-LOCODE", "RU LED")
-
- // REPLICA #2
- // Node_3 ip4/0.0.0.0/tcp/3
- nodes[1][0].SetAttribute("ClusterName", "B")
- nodes[1][0].SetAttribute("UN-LOCODE", "RU MOW")
-
- // Node_4, PK - ip4/0.0.0.0/tcp/4
- nodes[1][1].SetAttribute("ClusterName", "B")
- nodes[1][1].SetAttribute("UN-LOCODE", "RU DME")
-
- // Node_5, PK - ip4/0.0.0.0/tcp/5
- nodes[1][2].SetAttribute("ClusterName", "B")
- nodes[1][2].SetAttribute("UN-LOCODE", "RU MOW")
-
- sdkNode := testNode(9)
- sdkNode.SetAttribute("ClusterName", "B")
- sdkNode.SetAttribute("UN-LOCODE", "RU DME")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{
- NewAttributeMetric("ClusterName"),
- NewAttributeMetric("UN-LOCODE"),
- }
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Check that nodes in the same cluster and
- // in the same location should be the first in slice.
- // Nodes which are follow criteria but stay outside the replica
- // should be in the next slice.
-
- next := tr.Next()
- require.Equal(t, 4, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey()))
-
- next = tr.Next()
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
-
- sdkNode.SetAttribute("ClusterName", "B")
- sdkNode.SetAttribute("UN-LOCODE", "RU MOW")
-
- nodesCopy = copyVectors(nodes)
-
- tr, err = NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- next = tr.Next()
- require.Equal(t, 4, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey()))
-
- next = tr.Next()
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
-
- sdkNode.SetAttribute("ClusterName", "A")
- sdkNode.SetAttribute("UN-LOCODE", "RU LED")
-
- nodesCopy = copyVectors(nodes)
-
- tr, err = NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- next = tr.Next()
- require.Equal(t, 4, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[2].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[3].PublicKey()))
-
- next = tr.Next()
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
- t.Run("ec container", func(t *testing.T) {
- selectors := []int{4}
- ec := [][]int{{2, 1}}
-
- nodes, cnr := testECPlacement(selectors, ec)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "A")
- // Node_2, PK - ip4/0.0.0.0/tcp/2
- nodes[0][2].SetAttribute("ClusterName", "B")
- // Node_3, PK - ip4/0.0.0.0/tcp/3
- nodes[0][3].SetAttribute("ClusterName", "B")
-
- sdkNode := testNode(5)
- sdkNode.SetAttribute("ClusterName", "B")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{NewAttributeMetric("ClusterName")}
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `ClusterName` the order will be:
- // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}]
- // With priority metric `ClusterName` and current node in cluster B
- // the order should be:
- // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 3, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey()))
-
- next = tr.Next()
- // The last node is
- require.Equal(t, 1, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
- t.Run("one rep one geo metric", func(t *testing.T) {
- t.Skip()
- selectors := []int{2}
- replicas := []int{2}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("UN-LOCODE", "RU LED")
-
- sdkNode := testNode(2)
- sdkNode.SetAttribute("UN-LOCODE", "FI HEL")
-
- nodesCopy := copyVectors(nodes)
-
- parser, err := NewMetricsParser("/path/to/locode_db")
- require.NoError(t, err)
- m, err := parser.ParseMetrics([]string{geoDistance})
- require.NoError(t, err)
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `$geoDistance` the order will be:
- // [ {Node_0 RU MOW}, {Node_1 RU LED}]
- // With priority metric `$geoDistance` the order should be:
- // [ {Node_1 RU LED}, {Node_0 RU MOW}]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-}
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index e5f001d5a..c3c810001 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -4,9 +4,9 @@ import (
"context"
"strconv"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
@@ -57,12 +57,14 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
ts, err := g.tsSource.Tombstone(ctx, a, epoch)
if err != nil {
- log.Warn(ctx,
+ log.Warn(
logs.TombstoneCouldNotGetTheTombstoneTheSource,
zap.Error(err),
)
- } else if ts != nil {
- return g.handleTS(ctx, addrStr, ts, epoch)
+ } else {
+ if ts != nil {
+ return g.handleTS(addrStr, ts, epoch)
+ }
}
// requested tombstone not
@@ -70,12 +72,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
return false
}
-func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
+func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
for _, atr := range ts.Attributes() {
- if atr.Key() == objectV2.SysAttributeExpEpoch {
+ if atr.Key() == objectV2.SysAttributeExpEpoch || atr.Key() == objectV2.SysAttributeExpEpochNeoFS {
epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
if err != nil {
- g.log.Warn(ctx,
+ g.log.Warn(
logs.TombstoneExpirationParseFailure,
zap.Error(err),
)
diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go
index 2147a32fe..9d33e8179 100644
--- a/pkg/services/object_manager/tombstone/constructor.go
+++ b/pkg/services/object_manager/tombstone/constructor.go
@@ -3,7 +3,6 @@ package tombstone
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
@@ -24,7 +23,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- log: logger.NewLoggerWrapper(zap.NewNop()),
+ log: &logger.Logger{Logger: zap.NewNop()},
cacheSize: defaultLRUCacheSize,
}
}
@@ -50,7 +49,9 @@ func NewChecker(oo ...Option) *ExpirationChecker {
panicOnNil(cfg.tsSource, "Tombstone source")
cache, err := lru.New[string, uint64](cfg.cacheSize)
- assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize))
+ if err != nil {
+ panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err))
+ }
return &ExpirationChecker{
cache: cache,
diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go
index 975941847..1ff07b05a 100644
--- a/pkg/services/object_manager/tombstone/source/source.go
+++ b/pkg/services/object_manager/tombstone/source/source.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -39,7 +38,9 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) {
// Panics if any of the provided options does not allow
// constructing a valid tombstone local Source.
func NewSource(p TombstoneSourcePrm) Source {
- assert.False(p.s == nil, "Tombstone source: nil object service")
+ if p.s == nil {
+ panic("Tombstone source: nil object service")
+ }
return Source(p)
}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index dcaaec0b4..bf67ec4d4 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -9,29 +9,18 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
policycore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Policer.ProcessObject", trace.WithAttributes(
- attribute.String("address", objInfo.Address.String()),
- attribute.Bool("is_linking_object", objInfo.IsLinkingObject),
- attribute.Bool("is_ec_part", objInfo.ECInfo != nil),
- attribute.String("type", objInfo.Type.String()),
- ))
- defer span.End()
-
- cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container())
+ cnr, err := p.cnrSrc.Get(objInfo.Address.Container())
if err != nil {
if client.IsErrContainerNotFound(err) {
- existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container())
+ existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, objInfo.Address.Container())
if errWasRemoved != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved)
} else if existed {
@@ -48,7 +37,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
policy := cnr.Value.PlacementPolicy()
if policycore.IsECPlacement(policy) {
- return p.processECContainerObject(ctx, objInfo, cnr.Value)
+ return p.processECContainerObject(ctx, objInfo, policy)
}
return p.processRepContainerObject(ctx, objInfo, policy)
}
@@ -56,7 +45,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
idObj := objInfo.Address.Object()
idCnr := objInfo.Address.Container()
- nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy)
+ nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -86,7 +75,7 @@ func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectc
}
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
+ p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", objInfo.Address),
)
@@ -110,7 +99,6 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
// Number of copies that are stored on maintenance nodes.
var uncheckedCopies int
- var candidates []netmap.NodeInfo
for i := 0; shortage > 0 && i < len(nodes); i++ {
select {
case <-ctx.Done():
@@ -118,68 +106,71 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
default:
}
- var err error
- st := checkedNodes.processStatus(nodes[i])
- if !st.Processed() {
- st, err = p.checkStatus(ctx, addr, nodes[i])
- checkedNodes.set(nodes[i], st)
- if st == nodeDoesNotHoldObject {
- // 1. This is the first time the node is encountered (`!st.Processed()`).
- // 2. The node does not hold object (`st == nodeDoesNotHoldObject`).
- // So we need to try to put an object to it.
- candidates = append(candidates, nodes[i])
- continue
- }
- }
-
- switch st {
- case nodeIsLocal:
+ if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) {
requirements.needLocalCopy = true
shortage--
- case nodeIsUnderMaintenance:
- shortage--
- uncheckedCopies++
+ } else if nodes[i].IsMaintenance() {
+ shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
+ } else {
+ if status := checkedNodes.processStatus(nodes[i]); status.Processed() {
+ if status == nodeHoldsObject {
+ // node already contains replica, no need to replicate
+ nodes = append(nodes[:i], nodes[i+1:]...)
+ i--
+ shortage--
+ }
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK,
- zap.String("node", netmap.StringifyPublicKey(nodes[i])))
- case nodeHoldsObject:
- shortage--
- case nodeDoesNotHoldObject:
- case nodeStatusUnknown:
- p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
- zap.Stringer("object", addr),
- zap.Error(err))
- default:
- panic("unreachable")
+ continue
+ }
+
+ callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
+
+ _, err := p.remoteHeader(callCtx, nodes[i], addr, false)
+
+ cancel()
+
+ if err == nil {
+ shortage--
+ checkedNodes.submitReplicaHolder(nodes[i])
+ } else {
+ if client.IsErrObjectNotFound(err) {
+ checkedNodes.submitReplicaCandidate(nodes[i])
+ continue
+ } else if client.IsErrNodeUnderMaintenance(err) {
+ shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
+ } else {
+ p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
+ zap.Stringer("object", addr),
+ zap.String("error", err.Error()),
+ )
+ }
+ }
}
+
+ nodes = append(nodes[:i], nodes[i+1:]...)
+ i--
}
- p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies)
+ p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
}
-func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) {
- if p.netmapKeys.IsLocalKey(node.PublicKey()) {
- return nodeIsLocal, nil
- }
- if node.Status().IsMaintenance() {
- return nodeIsUnderMaintenance, nil
- }
+// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
+//
+// consider remote nodes under maintenance as problem OK. Such
+// nodes MAY not respond with object, however, this is how we
+// prevent spam with new replicas.
+// However, additional copies should not be removed in this case,
+// because we can remove the only copy this way.
+func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
+ checkedNodes.submitReplicaHolder(node)
+ shortage--
+ uncheckedCopies++
- callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
- _, err := p.remoteHeader(callCtx, node, addr, false)
- cancel()
-
- if err == nil {
- return nodeHoldsObject, nil
- }
- if client.IsErrObjectNotFound(err) {
- return nodeDoesNotHoldObject, nil
- }
- if client.IsErrNodeUnderMaintenance(err) {
- return nodeIsUnderMaintenance, nil
- }
- return nodeStatusUnknown, err
+ p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK,
+ zap.String("node", netmap.StringifyPublicKey(node)),
+ )
+ return shortage, uncheckedCopies
}
func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
@@ -187,7 +178,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
) {
switch {
case shortage > 0:
- p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected,
+ p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected,
zap.Stringer("object", addr),
zap.Uint32("shortage", shortage),
)
@@ -203,7 +194,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
case uncheckedCopies > 0:
// If we have more copies than needed, but some of them are from the maintenance nodes,
// save the local copy.
- p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
+ p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
zap.Int("count", uncheckedCopies))
case uncheckedCopies == 0:
diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go
index 69879c439..d4c7ccbf9 100644
--- a/pkg/services/policer/check_test.go
+++ b/pkg/services/policer/check_test.go
@@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) {
cache.SubmitSuccessfulReplication(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
- cache.set(node, nodeDoesNotHoldObject)
+ cache.submitReplicaCandidate(node)
require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject)
- cache.set(node, nodeHoldsObject)
+ cache.submitReplicaHolder(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
}
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index fbdeb3148..61a65fc21 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -10,7 +10,6 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
@@ -28,18 +27,18 @@ type ecChunkProcessResult struct {
var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node")
-func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
+func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
if objInfo.ECInfo == nil {
- return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy())
+ return p.processECContainerRepObject(ctx, objInfo, policy)
}
- return p.processECContainerECObject(ctx, objInfo, cnr)
+ return p.processECContainerECObject(ctx, objInfo, policy)
}
// processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects.
// All of them must be stored on all of the container nodes.
func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
objID := objInfo.Address.Object()
- nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy)
+ nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -59,7 +58,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes)
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
+ p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", objInfo.Address),
)
@@ -68,8 +67,8 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
return nil
}
-func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
- nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
+func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
+ nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -86,12 +85,12 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
res := p.processECChunk(ctx, objInfo, nn[0])
if !res.validPlacement {
// drop local chunk only if all required chunks are in place
- res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr)
+ res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0])
}
- p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
+ p.adjustECPlacement(ctx, objInfo, nn[0], policy)
if res.removeLocal {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
+ p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
p.cbRedundantCopy(ctx, objInfo.Address)
}
return nil
@@ -101,15 +100,15 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
var removeLocalChunk bool
requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
- if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
// current node is required node, we are happy
return ecChunkProcessResult{
validPlacement: true,
}
}
- if requiredNode.Status().IsMaintenance() {
+ if requiredNode.IsMaintenance() {
// consider maintenance mode has object, but do not drop local copy
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
return ecChunkProcessResult{}
}
@@ -120,7 +119,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
if err == nil {
removeLocalChunk = true
} else if client.IsErrObjectNotFound(err) {
- p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
+ p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
task := replicator.Task{
NumCopies: 1,
Addr: objInfo.Address,
@@ -129,9 +128,9 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
p.replicator.HandleReplicationTask(ctx, task, newNodeCache())
} else if client.IsErrNodeUnderMaintenance(err) {
// consider maintenance mode has object, but do not drop local copy
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
} else {
- p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err))
+ p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error()))
}
return ecChunkProcessResult{
@@ -139,20 +138,20 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
}
}
-func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool {
+func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) bool {
var parentAddress oid.Address
parentAddress.SetContainer(objInfo.Address.Container())
parentAddress.SetObject(objInfo.ECInfo.ParentID)
requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo)
if len(requiredChunkIndexes) == 0 {
- p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
+ p.log.Info(logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
return true
}
err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes)
if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
return false
}
if len(requiredChunkIndexes) == 0 {
@@ -170,9 +169,8 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I
addr.SetContainer(objInfo.Address.Container())
addr.SetObject(indexToObjectID[index])
p.replicator.HandlePullTask(ctx, replicator.Task{
- Addr: addr,
- Nodes: candidates,
- Container: cnr,
+ Addr: addr,
+ Nodes: candidates,
})
}
// there was some missing chunks, it's not ok
@@ -185,7 +183,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec
if uint32(i) == objInfo.ECInfo.Total {
break
}
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
}
}
@@ -210,7 +208,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad
func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
var eiErr *objectSDK.ECInfoError
for _, n := range nodes {
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
continue
}
_, err := p.remoteHeader(ctx, n, parentAddress, true)
@@ -224,11 +222,11 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
var chunkID oid.ID
if err := chunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
return false
}
if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID {
- p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
+ p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
return false
}
@@ -239,7 +237,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
for index, candidates := range required {
if len(candidates) == 0 {
- p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
+ p.log.Error(logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
return false
}
}
@@ -247,7 +245,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
return true
}
-func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) {
+func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, policy netmap.PlacementPolicy) {
var parentAddress oid.Address
parentAddress.SetContainer(objInfo.Address.Container())
parentAddress.SetObject(objInfo.ECInfo.ParentID)
@@ -260,7 +258,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
return
}
var err error
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
_, err = p.localHeader(ctx, parentAddress)
} else {
_, err = p.remoteHeader(ctx, n, parentAddress, true)
@@ -271,20 +269,18 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
resolved[ch.Index] = append(resolved[ch.Index], n)
var ecInfoChunkID oid.ID
if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
return
}
if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID {
- p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
+ p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
return
}
chunkIDs[ch.Index] = ecInfoChunkID
}
- } else if client.IsErrObjectAlreadyRemoved(err) {
- restore = false
- } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
- p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
+ } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
+ p.log.Warn(logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
p.replicator.HandleReplicationTask(ctx, replicator.Task{
NumCopies: 1,
Addr: objInfo.Address,
@@ -296,23 +292,21 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total {
return
}
- if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() {
+ if objInfo.ECInfo.Total-uint32(len(resolved)) > policy.ReplicaDescriptor(0).GetECParityCount() {
var found []uint32
for i := range resolved {
found = append(found, i)
}
- p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
+ p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
return
}
- p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
+ p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, policy)
}
-func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID,
- cnr containerSDK.Container,
-) {
- c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
+func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, policy netmap.PlacementPolicy) {
+ c, err := erasurecode.NewConstructor(int(policy.ReplicaDescriptor(0).GetECDataCount()), int(policy.ReplicaDescriptor(0).GetECParityCount()))
if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs)
@@ -321,7 +315,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
}
key, err := p.keyStorage.GetKey(nil)
if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
required := make([]bool, len(parts))
@@ -331,7 +325,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
}
}
if err := c.ReconstructParts(parts, required, key); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
}
for idx, part := range parts {
@@ -343,11 +337,10 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
pID, _ := part.ID()
addr.SetObject(pID)
targetNode := nodes[idx%len(nodes)]
- if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
p.replicator.HandleLocalPutTask(ctx, replicator.Task{
- Addr: addr,
- Obj: part,
- Container: cnr,
+ Addr: addr,
+ Obj: part,
})
} else {
p.replicator.HandleReplicationTask(ctx, replicator.Task{
@@ -371,7 +364,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
var obj *objectSDK.Object
var err error
for _, node := range nodes {
- if p.netmapKeys.IsLocalKey(node.PublicKey()) {
+ if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
obj, err = p.localObject(egCtx, objID)
} else {
obj, err = p.remoteObject(egCtx, node, objID)
@@ -379,7 +372,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
if err == nil {
break
}
- p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
+ p.log.Warn(logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
}
if obj != nil {
parts[idx] = obj
@@ -388,7 +381,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
})
}
if err := errGroup.Wait(); err != nil {
- p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
+ p.log.Error(logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
return nil
}
return parts
diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go
index c6980536b..e230153f9 100644
--- a/pkg/services/policer/ec_test.go
+++ b/pkg/services/policer/ec_test.go
@@ -36,7 +36,7 @@ func TestECChunkHasValidPlacement(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(chunkAddress.Container()) {
return cnr, nil
}
@@ -123,7 +123,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(chunkAddress.Container()) {
return cnr, nil
}
@@ -448,7 +448,7 @@ func TestECChunkRestore(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(parentAddress.Container()) {
return cnr, nil
}
@@ -599,7 +599,7 @@ func TestECChunkRestoreNodeOff(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(parentAddress.Container()) {
return cnr, nil
}
diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go
index c2157de5d..cd47cb0fc 100644
--- a/pkg/services/policer/nodecache.go
+++ b/pkg/services/policer/nodecache.go
@@ -8,9 +8,6 @@ const (
nodeNotProcessed nodeProcessStatus = iota
nodeDoesNotHoldObject
nodeHoldsObject
- nodeStatusUnknown
- nodeIsUnderMaintenance
- nodeIsLocal
)
func (st nodeProcessStatus) Processed() bool {
@@ -18,19 +15,37 @@ func (st nodeProcessStatus) Processed() bool {
}
// nodeCache tracks Policer's check progress.
-type nodeCache map[uint64]nodeProcessStatus
+type nodeCache map[uint64]bool
func newNodeCache() nodeCache {
- return make(map[uint64]nodeProcessStatus)
+ return make(map[uint64]bool)
}
-func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) {
+func (n nodeCache) set(node netmap.NodeInfo, val bool) {
n[node.Hash()] = val
}
+// submits storage node as a candidate to store the object replica in case of
+// shortage.
+func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) {
+ n.set(node, false)
+}
+
+// submits storage node as a current object replica holder.
+func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) {
+ n.set(node, true)
+}
+
// processStatus returns current processing status of the storage node.
func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
- return n[node.Hash()]
+ switch val, ok := n[node.Hash()]; {
+ case !ok:
+ return nodeNotProcessed
+ case val:
+ return nodeHoldsObject
+ default:
+ return nodeDoesNotHoldObject
+ }
}
// SubmitSuccessfulReplication marks given storage node as a current object
@@ -38,5 +53,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
//
// SubmitSuccessfulReplication implements replicator.TaskResult.
func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
- n.set(node, nodeHoldsObject)
+ n.submitReplicaHolder(node)
}
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
index 5d59604c2..9dbfd8b9f 100644
--- a/pkg/services/policer/option.go
+++ b/pkg/services/policer/option.go
@@ -91,7 +91,7 @@ type cfg struct {
func defaultCfg() *cfg {
return &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
batchSize: 10,
cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB
sleepDuration: 1 * time.Second,
@@ -143,7 +143,7 @@ func WithPlacementBuilder(v placement.Builder) Option {
}
}
-// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer.
+// WithRemoteObjectHeader returns option to set remote object header receiver of Policer.
func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option {
return func(c *cfg) {
c.remoteHeader = v
diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go
index c91e7cc7c..363c0b922 100644
--- a/pkg/services/policer/policer.go
+++ b/pkg/services/policer/policer.go
@@ -1,13 +1,13 @@
package policer
import (
- "fmt"
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
+ "go.uber.org/zap"
)
type objectsInWork struct {
@@ -55,8 +55,12 @@ func New(opts ...Option) *Policer {
opts[i](c)
}
+ c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))}
+
cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize))
- assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize))
+ if err != nil {
+ panic(err)
+ }
return &Policer{
cfg: c,
diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go
index 049c33753..e353ea428 100644
--- a/pkg/services/policer/policer_test.go
+++ b/pkg/services/policer/policer_test.go
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"errors"
- "slices"
"sort"
"testing"
"time"
@@ -37,10 +36,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) {
// Container source and bury function
buryCh := make(chan oid.Address)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -79,7 +78,6 @@ func TestProcessObject(t *testing.T) {
maintenanceNodes []int
wantRemoveRedundant bool
wantReplicateTo []int
- headResult map[int]error
ecInfo *objectcore.ECInfo
}{
{
@@ -129,7 +127,7 @@ func TestProcessObject(t *testing.T) {
nodeCount: 2,
policy: `REP 2 REP 2`,
placement: [][]int{{0, 1}, {0, 1}},
- wantReplicateTo: []int{1},
+ wantReplicateTo: []int{1, 1}, // is this actually good?
},
{
desc: "lock object must be replicated to all nodes",
@@ -147,14 +145,6 @@ func TestProcessObject(t *testing.T) {
objHolders: []int{1},
maintenanceNodes: []int{2},
},
- {
- desc: "preserve local copy when node response with MAINTENANCE",
- nodeCount: 3,
- policy: `REP 2`,
- placement: [][]int{{1, 2}},
- objHolders: []int{1},
- headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)},
- },
{
desc: "lock object must be replicated to all EC nodes",
objType: objectSDK.TypeLock,
@@ -171,14 +161,6 @@ func TestProcessObject(t *testing.T) {
placement: [][]int{{0, 1, 2}},
wantReplicateTo: []int{1, 2},
},
- {
- desc: "do not remove local copy when MAINTENANCE status is cached",
- objType: objectSDK.TypeRegular,
- nodeCount: 3,
- policy: `REP 1 REP 1`,
- placement: [][]int{{1, 2}, {1, 0}},
- headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)},
- },
}
for i := range tests {
@@ -192,7 +174,7 @@ func TestProcessObject(t *testing.T) {
nodes[i].SetPublicKey([]byte{byte(i)})
}
for _, i := range ti.maintenanceNodes {
- nodes[i].SetStatus(netmap.Maintenance)
+ nodes[i].SetMaintenance()
}
var policy netmap.PlacementPolicy
@@ -222,14 +204,11 @@ func TestProcessObject(t *testing.T) {
t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a)
return nil, errors.New("unexpected object head")
}
- if ti.headResult != nil {
- if err, ok := ti.headResult[index]; ok {
- return nil, err
+ for _, i := range ti.objHolders {
+ if index == i {
+ return nil, nil
}
}
- if slices.Contains(ti.objHolders, index) {
- return nil, nil
- }
return nil, new(apistatus.ObjectNotFound)
}
@@ -238,14 +217,14 @@ func TestProcessObject(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(addr.Container()) {
return cnr, nil
}
t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container())
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -303,10 +282,10 @@ func TestProcessObjectError(t *testing.T) {
cnr := &container.Container{}
cnr.Value.Init()
source := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return nil, new(apistatus.ContainerNotFound)
},
}
@@ -351,10 +330,10 @@ func TestIteratorContract(t *testing.T) {
}
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -443,22 +422,18 @@ func (it *sliceKeySpaceIterator) Rewind() {
}
type containerSrc struct {
- get func(ctx context.Context, id cid.ID) (*container.Container, error)
- deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error)
+ get func(id cid.ID) (*container.Container, error)
+ deletionInfo func(id cid.ID) (*container.DelInfo, error)
}
-func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
- return f.get(ctx, id)
-}
+func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) }
-func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
- return f.deletionInfo(ctx, id)
-}
+func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) }
// placementBuilderFunc is a placement.Builder backed by a function
type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
-func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return f(c, o, p)
}
diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go
index 635a5683b..a5ebb0010 100644
--- a/pkg/services/policer/process.go
+++ b/pkg/services/policer/process.go
@@ -7,20 +7,17 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
func (p *Policer) Run(ctx context.Context) {
p.shardPolicyWorker(ctx)
- p.log.Info(ctx, logs.PolicerRoutineStopped)
+ p.log.Info(logs.PolicerRoutineStopped)
}
func (p *Policer) shardPolicyWorker(ctx context.Context) {
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String())
for {
select {
case <-ctx.Done():
@@ -36,7 +33,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
continue
}
- p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
+ p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
}
skipMap := newSkipMap()
@@ -62,9 +59,9 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
if p.objsInWork.add(addr.Address) {
err := p.processObject(ctx, addr)
if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
- p.log.Error(ctx, logs.PolicerUnableToProcessObj,
+ p.log.Error(logs.PolicerUnableToProcessObj,
zap.Stringer("object", addr.Address),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
p.cache.Add(addr.Address, time.Now())
p.objsInWork.remove(addr.Address)
@@ -72,7 +69,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
}
})
if err != nil {
- p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err))
+ p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err))
}
}
}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 8c6f0df06..7e5c6e093 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
@@ -26,7 +27,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork,
+ p.log.Debug(logs.ReplicatorFinishWork,
zap.Uint32("amount of unfinished replicas", task.NumCopies),
)
}()
@@ -42,9 +43,10 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
var err error
task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr)
if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err))
+ zap.Error(err),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return
}
@@ -63,6 +65,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
log := p.log.With(
zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])),
zap.Stringer("object", task.Addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
callCtx, cancel := context.WithTimeout(ctx, p.putTimeout)
@@ -72,11 +75,11 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
cancel()
if err != nil {
- log.Error(ctx, logs.ReplicatorCouldNotReplicateObject,
- zap.Error(err),
+ log.Error(logs.ReplicatorCouldNotReplicateObject,
+ zap.String("error", err.Error()),
)
} else {
- log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated)
+ log.Debug(logs.ReplicatorObjectSuccessfullyReplicated)
task.NumCopies--
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
index 216fe4919..d178700f6 100644
--- a/pkg/services/replicator/pull.go
+++ b/pkg/services/replicator/pull.go
@@ -3,12 +3,11 @@ package replicator
import (
"context"
"errors"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
@@ -22,7 +21,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
}()
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask",
@@ -43,24 +42,31 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
if err == nil {
break
}
- endpoints := slices.Collect(node.NetworkEndpoints())
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ var endpoints []string
+ node.IterateNetworkEndpoints(func(s string) bool {
+ endpoints = append(endpoints, s)
+ return false
+ })
+ p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
zap.Stringer("object", task.Addr),
zap.Error(err),
- zap.Strings("endpoints", endpoints))
+ zap.Strings("endpoints", endpoints),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if obj == nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
zap.Stringer("object", task.Addr),
- zap.Error(errFailedToGetObjectFromAnyNode))
+ zap.Error(errFailedToGetObjectFromAnyNode),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return
}
- err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
+ err := engine.Put(ctx, p.localStorage, obj)
if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err))
+ zap.Error(err),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
index bcad8471d..c06ec3f65 100644
--- a/pkg/services/replicator/put.go
+++ b/pkg/services/replicator/put.go
@@ -5,8 +5,8 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -19,7 +19,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
}()
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask",
@@ -30,16 +30,18 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
defer span.End()
if task.Obj == nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(errObjectNotDefined))
+ zap.Error(errObjectNotDefined),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return
}
- err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
+ err := engine.Put(ctx, p.localStorage, task.Obj)
if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err))
+ zap.Error(err),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index a940cef37..f2f86daf0 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -7,6 +7,7 @@ import (
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
// Replicator represents the utility that replicates
@@ -44,6 +45,8 @@ func New(opts ...Option) *Replicator {
opts[i](c)
}
+ c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))}
+
return &Replicator{
cfg: c,
}
diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go
index a03f8dcaa..d2b5b2506 100644
--- a/pkg/services/replicator/task.go
+++ b/pkg/services/replicator/task.go
@@ -1,7 +1,6 @@
package replicator
import (
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -17,6 +16,4 @@ type Task struct {
Obj *objectSDK.Object
// Nodes is a list of potential object holders.
Nodes []netmap.NodeInfo
-
- Container containerSDK.Container
}
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index f0591de71..76c220fab 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -4,10 +4,10 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"go.uber.org/zap"
)
@@ -33,7 +33,10 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log
}
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create"))
+ s.log.Debug(logs.ServingRequest,
+ zap.String("component", "SessionService"),
+ zap.String("request", "Create"),
+ )
respBody, err := s.exec.Create(ctx, req.GetBody())
if err != nil {
diff --git a/pkg/services/session/server.go b/pkg/services/session/server.go
index e8555a7c9..9e44ae667 100644
--- a/pkg/services/session/server.go
+++ b/pkg/services/session/server.go
@@ -3,7 +3,7 @@ package session
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
)
// Server is an interface of the FrostFS API Session service server.
diff --git a/pkg/services/session/sign.go b/pkg/services/session/sign.go
index 3664c1403..690fff896 100644
--- a/pkg/services/session/sign.go
+++ b/pkg/services/session/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
type signService struct {
diff --git a/pkg/services/session/storage/persistent/executor.go b/pkg/services/session/storage/persistent/executor.go
index ea0233f9a..21f55a7d1 100644
--- a/pkg/services/session/storage/persistent/executor.go
+++ b/pkg/services/session/storage/persistent/executor.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.etcd.io/bbolt"
diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go
index f80ecb591..124d36930 100644
--- a/pkg/services/session/storage/persistent/executor_test.go
+++ b/pkg/services/session/storage/persistent/executor_test.go
@@ -8,8 +8,8 @@ import (
"path/filepath"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go
index 60db97f90..411734ea1 100644
--- a/pkg/services/session/storage/persistent/options.go
+++ b/pkg/services/session/storage/persistent/options.go
@@ -19,7 +19,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- l: logger.NewLoggerWrapper(zap.L()),
+ l: &logger.Logger{Logger: zap.L()},
timeout: 100 * time.Millisecond,
}
}
diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go
index 132d62445..71711e371 100644
--- a/pkg/services/session/storage/persistent/storage.go
+++ b/pkg/services/session/storage/persistent/storage.go
@@ -1,7 +1,6 @@
package persistent
import (
- "context"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
@@ -64,7 +63,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
// enable encryption if it
// was configured so
if cfg.privateKey != nil {
- rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8)
+ rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8)
cfg.privateKey.D.FillBytes(rawKey)
c, err := aes.NewCipher(rawKey)
@@ -106,7 +105,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok
return err
})
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage,
+ s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage,
zap.Error(err),
zap.Stringer("ownerID", ownerID),
zap.String("tokenID", hex.EncodeToString(tokenID)),
@@ -131,7 +130,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
if epochFromToken(v) <= epoch {
err = c.Delete()
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken,
+ s.l.Error(logs.PersistentCouldNotDeleteSToken,
zap.String("token_id", hex.EncodeToString(k)),
)
}
@@ -142,7 +141,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
})
})
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens,
+ s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens,
zap.Uint64("epoch", epoch),
)
}
diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go
index 423e579d7..cd498709c 100644
--- a/pkg/services/session/storage/temporary/executor.go
+++ b/pkg/services/session/storage/temporary/executor.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -38,7 +38,7 @@ func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody)
s.mtx.Lock()
s.tokens[key{
tokenID: base58.Encode(uidBytes),
- ownerID: id.EncodeToString(),
+ ownerID: base58.Encode(id.WalletBytes()),
}] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration())
s.mtx.Unlock()
diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go
index c9da6b842..ee93dee71 100644
--- a/pkg/services/session/storage/temporary/storage.go
+++ b/pkg/services/session/storage/temporary/storage.go
@@ -9,9 +9,7 @@ import (
)
type key struct {
- // nolint:unused
tokenID string
- // nolint:unused
ownerID string
}
@@ -41,7 +39,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken
s.mtx.RLock()
t := s.tokens[key{
tokenID: base58.Encode(tokenID),
- ownerID: ownerID.EncodeToString(),
+ ownerID: base58.Encode(ownerID.WalletBytes()),
}]
s.mtx.RUnlock()
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index 58757ff6d..693b16e60 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -22,7 +22,7 @@ import (
)
func (s *Service) newAPERequest(ctx context.Context, namespace string,
- cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+ cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) (aperequest.Request, error) {
schemaMethod, err := converter.SchemaMethodFromACLOperation(operation)
if err != nil {
@@ -36,7 +36,7 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()),
nativeschema.PropertyKeyActorRole: schemaRole,
}
- reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey)
+ reqProps, err = s.fillWithUserClaimTags(reqProps, publicKey)
if err != nil {
return aperequest.Request{}, err
}
@@ -53,19 +53,15 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
}
- resProps := map[string]string{
- nativeschema.ProperyKeyTreeID: treeID,
- }
-
return aperequest.NewRequest(
schemaMethod,
- aperequest.NewResource(resourceName, resProps),
+ aperequest.NewResource(resourceName, make(map[string]string)),
reqProps,
), nil
}
func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
- container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+ container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) error {
namespace := ""
cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns")
@@ -73,27 +69,27 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
namespace = cntNamespace
}
- request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey)
+ request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey)
if err != nil {
return fmt.Errorf("failed to create ape request: %w", err)
}
- return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{
- Request: request,
- Namespace: namespace,
- Container: cid,
- ContainerOwner: container.Value.Owner(),
- PublicKey: publicKey,
- BearerToken: bt,
+ return s.apeChecker.CheckAPE(checkercore.CheckPrm{
+ Request: request,
+ Namespace: namespace,
+ Container: cid,
+ PublicKey: publicKey,
+ BearerToken: bt,
+ SoftAPECheck: false,
})
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
+func (s *Service) fillWithUserClaimTags(reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
if reqProps == nil {
reqProps = make(map[string]string)
}
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey)
+ props, err := aperequest.FormFrostfsIDRequestProperties(s.frostfsidSubjectProvider, publicKey)
if err != nil {
return reqProps, err
}
diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go
deleted file mode 100644
index 7b209fd47..000000000
--- a/pkg/services/tree/ape_test.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package tree
-
-import (
- "context"
- "encoding/hex"
- "fmt"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/stretchr/testify/require"
-)
-
-var (
- containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy"
-
- senderPrivateKey, _ = keys.NewPrivateKey()
-
- senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes())
-
- rootCnr = &core.Container{Value: containerSDK.Container{}}
-)
-
-type frostfsIDProviderMock struct {
- subjects map[util.Uint160]*client.Subject
- subjectsExtended map[util.Uint160]*client.SubjectExtended
-}
-
-func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
- v, ok := f.subjects[key]
- if !ok {
- return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
- }
- return v, nil
-}
-
-func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
- v, ok := f.subjectsExtended[key]
- if !ok {
- return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
- }
- return v, nil
-}
-
-var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil)
-
-func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock {
- return &frostfsIDProviderMock{
- subjects: map[util.Uint160]*client.Subject{
- scriptHashFromSenderKey(t, senderKey): {
- Namespace: "testnamespace",
- Name: "test",
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExtended: map[util.Uint160]*client.SubjectExtended{
- scriptHashFromSenderKey(t, senderKey): {
- Namespace: "testnamespace",
- Name: "test",
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 1,
- Name: "test",
- Namespace: "testnamespace",
- KV: map[string]string{
- "attr1": "value1",
- "attr2": "value2",
- },
- },
- },
- },
- },
- }
-}
-
-func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
- pk, err := keys.NewPublicKeyFromString(senderKey)
- require.NoError(t, err)
- return pk.GetScriptHash()
-}
-
-type stMock struct{}
-
-func (m *stMock) CurrentEpoch() uint64 {
- return 8
-}
-
-func TestCheckAPE(t *testing.T) {
- cid := cid.ID{}
- _ = cid.DecodeString(containerID)
-
- t.Run("treeID rule", func(t *testing.T) {
- los := inmemory.NewInmemoryLocalStorage()
- mcs := inmemory.NewInmemoryMorphRuleChainStorage()
- fid := newFrostfsIDProviderMock(t)
- s := Service{
- cfg: cfg{
- frostfsidSubjectProvider: fid,
- },
- apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
- }
-
- mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.QuotaLimitReached,
- Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindResource,
- Key: nativeschema.ProperyKeyTreeID,
- Value: versionTreeID,
- },
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey())
-
- var chErr *checkercore.ChainRouterError
- require.ErrorAs(t, err, &chErr)
- require.Equal(t, chain.QuotaLimitReached, chErr.Status())
- })
-
- t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) {
- los := inmemory.NewInmemoryLocalStorage()
- mcs := inmemory.NewInmemoryMorphRuleChainStorage()
- fid := newFrostfsIDProviderMock(t)
- s := Service{
- cfg: cfg{
- frostfsidSubjectProvider: fid,
- },
- apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
- }
-
- los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Condition: []chain.Condition{
- {
- Op: chain.CondStringNotEquals,
- Kind: chain.KindResource,
- Key: nativeschema.PropertyKeyObjectType,
- Value: "TOMBSTONE",
- },
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
- require.NoError(t, err)
- })
-
- t.Run("delete rule won't affect tree add", func(t *testing.T) {
- los := inmemory.NewInmemoryLocalStorage()
- mcs := inmemory.NewInmemoryMorphRuleChainStorage()
- fid := newFrostfsIDProviderMock(t)
- s := Service{
- cfg: cfg{
- frostfsidSubjectProvider: fid,
- },
- apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
- }
-
- los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Condition: []chain.Condition{
- {
- Op: chain.CondStringNotEquals,
- Kind: chain.KindResource,
- Key: nativeschema.PropertyKeyObjectType,
- Value: "TOMBSTONE",
- },
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
- require.NoError(t, err)
- })
-}
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index a11700771..38501b852 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -8,18 +8,19 @@ import (
"sync"
"time"
- internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
+ tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/credentials/insecure"
)
type clientCache struct {
sync.Mutex
simplelru.LRU[string, cacheItem]
key *ecdsa.PrivateKey
- ds *internalNet.DialerSource
}
type cacheItem struct {
@@ -35,7 +36,7 @@ const (
var errRecentlyFailed = errors.New("client has recently failed")
-func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
+func (c *clientCache) init(pk *ecdsa.PrivateKey) {
l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) {
if conn := value.cc; conn != nil {
_ = conn.Close()
@@ -43,12 +44,11 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
})
c.LRU = *l
c.key = pk
- c.ds = ds
}
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
c.Lock()
- ccInt, ok := c.Get(netmapAddr)
+ ccInt, ok := c.LRU.Get(netmapAddr)
c.Unlock()
if ok {
@@ -66,19 +66,14 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
}
}
- var netAddr network.Address
- if err := netAddr.FromString(netmapAddr); err != nil {
- return nil, err
- }
-
- cc, err := dialTreeService(ctx, netAddr, c.key, c.ds)
+ cc, err := c.dialTreeService(ctx, netmapAddr)
lastTry := time.Now()
c.Lock()
if err != nil {
- c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
+ c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
} else {
- c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
+ c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
}
c.Unlock()
@@ -88,3 +83,46 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
return NewTreeServiceClient(cc), nil
}
+
+func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
+ var netAddr network.Address
+ if err := netAddr.FromString(netmapAddr); err != nil {
+ return nil, err
+ }
+
+ opts := []grpc.DialOption{
+ grpc.WithChainUnaryInterceptor(
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ ),
+ }
+
+ if !netAddr.IsTLSEnabled() {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ req := &HealthcheckRequest{
+ Body: &HealthcheckRequest_Body{},
+ }
+ if err := SignMessage(req, c.key); err != nil {
+ return nil, err
+ }
+
+ cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ defer cancel()
+ // perform some request to check connection
+ if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
+ _ = cc.Close()
+ return nil, err
+ }
+ return cc, nil
+}
diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go
index c641a21a2..435257550 100644
--- a/pkg/services/tree/container.go
+++ b/pkg/services/tree/container.go
@@ -2,7 +2,6 @@ package tree
import (
"bytes"
- "context"
"crypto/sha256"
"fmt"
"sync"
@@ -33,13 +32,13 @@ type containerCacheItem struct {
const defaultContainerCacheSize = 10
// getContainerNodes returns nodes in the container and a position of local key in the list.
-func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
- nm, err := s.nmSource.GetNetMap(ctx, 0)
+func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
+ nm, err := s.nmSource.GetNetMap(0)
if err != nil {
return nil, -1, fmt.Errorf("can't get netmap: %w", err)
}
- cnr, err := s.cnrSource.Get(ctx, cid)
+ cnr, err := s.cnrSource.Get(cid)
if err != nil {
return nil, -1, fmt.Errorf("can't get container: %w", err)
}
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index e7a13827e..95bdda34b 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -131,7 +131,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) {
t.Run("boltdb forest", func(t *testing.T) {
p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
require.NoError(t, p.Open(context.Background(), 0o644))
- require.NoError(t, p.Init(context.Background()))
+ require.NoError(t, p.Init())
testGetSubTreeOrderAsc(t, p)
})
}
diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go
index 07503f8c3..0f0e4ee57 100644
--- a/pkg/services/tree/metrics.go
+++ b/pkg/services/tree/metrics.go
@@ -6,7 +6,6 @@ type MetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
- AddOperation(string, string)
}
type defaultMetricsRegister struct{}
@@ -14,4 +13,3 @@ type defaultMetricsRegister struct{}
func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {}
-func (defaultMetricsRegister) AddOperation(string, string) {}
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 56cbcc081..6a20fe5cc 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -1,12 +1,9 @@
package tree
import (
- "context"
"crypto/ecdsa"
- "sync/atomic"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -20,12 +17,12 @@ import (
type ContainerSource interface {
container.Source
- DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error)
+ DeletionInfo(cid.ID) (*container.DelInfo, error)
// List must return list of all the containers in the FrostFS network
// at the moment of a call and any error that does not allow fetching
// container information.
- List(ctx context.Context) ([]cid.ID, error)
+ List() ([]cid.ID, error)
}
type cfg struct {
@@ -36,20 +33,19 @@ type cfg struct {
nmSource netmap.Source
cnrSource ContainerSource
frostfsidSubjectProvider frostfsidcore.SubjectProvider
+ eaclSource container.EACLSource
forest pilorama.Forest
// replication-related parameters
replicatorChannelCapacity int
replicatorWorkerCount int
replicatorTimeout time.Duration
containerCacheSize int
- authorizedKeys atomic.Pointer[[][]byte]
- syncBatchSize int
+ authorizedKeys [][]byte
localOverrideStorage policyengine.LocalOverrideStorage
morphChainStorage policyengine.MorphRuleChainStorageReader
metrics MetricsRegister
- ds *net.DialerSource
}
// Option represents configuration option for a tree service.
@@ -69,6 +65,14 @@ func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option
}
}
+// WithEACLSource sets a eACL table source for a tree service.
+// This option is required.
+func WithEACLSource(src container.EACLSource) Option {
+ return func(c *cfg) {
+ c.eaclSource = src
+ }
+}
+
// WithNetmapSource sets a netmap source for a tree service.
// This option is required.
func WithNetmapSource(src netmap.Source) Option {
@@ -116,12 +120,6 @@ func WithReplicationWorkerCount(n int) Option {
}
}
-func WithSyncBatchSize(n int) Option {
- return func(c *cfg) {
- c.syncBatchSize = n
- }
-}
-
func WithContainerCacheSize(n int) Option {
return func(c *cfg) {
if n > 0 {
@@ -148,7 +146,10 @@ func WithMetrics(v MetricsRegister) Option {
// keys that have rights to use Tree service.
func WithAuthorizedKeys(keys keys.PublicKeys) Option {
return func(c *cfg) {
- c.authorizedKeys.Store(fromPublicKeys(keys))
+ c.authorizedKeys = nil
+ for _, key := range keys {
+ c.authorizedKeys = append(c.authorizedKeys, key.Bytes())
+ }
}
}
@@ -169,9 +170,3 @@ func WithNetmapState(state netmap.State) Option {
c.state = state
}
}
-
-func WithDialerSource(ds *net.DialerSource) Option {
- return func(c *cfg) {
- c.ds = ds
- }
-}
diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go
deleted file mode 100644
index 8f21686df..000000000
--- a/pkg/services/tree/qos.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package tree
-
-import (
- "context"
-
- "google.golang.org/grpc"
-)
-
-var _ TreeServiceServer = (*ioTagAdjust)(nil)
-
-type AdjustIOTag interface {
- AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
-}
-
-type ioTagAdjust struct {
- s TreeServiceServer
- a AdjustIOTag
-}
-
-func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer {
- return &ioTagAdjust{
- s: s,
- a: a,
- }
-}
-
-func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Add(ctx, req)
-}
-
-func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.AddByPath(ctx, req)
-}
-
-func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Apply(ctx, req)
-}
-
-func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.GetNodeByPath(ctx, req)
-}
-
-func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
- ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
- return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{
- sender: srv,
- ServerStream: srv,
- ctxF: func() context.Context { return ctx },
- })
-}
-
-func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
- ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
- return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{
- sender: srv,
- ServerStream: srv,
- ctxF: func() context.Context { return ctx },
- })
-}
-
-func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Healthcheck(ctx, req)
-}
-
-func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Move(ctx, req)
-}
-
-func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Remove(ctx, req)
-}
-
-func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.TreeList(ctx, req)
-}
-
-type qosSend[T any] interface {
- Send(T) error
-}
-
-type qosServerWrapper[T any] struct {
- grpc.ServerStream
- sender qosSend[T]
- ctxF func() context.Context
-}
-
-func (w *qosServerWrapper[T]) Send(resp T) error {
- return w.sender.Send(resp)
-}
-
-func (w *qosServerWrapper[T]) Context() context.Context {
- return w.ctxF()
-}
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index 647f8cb30..ec41a60d4 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -6,32 +6,19 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
- "google.golang.org/grpc"
)
var errNoSuitableNode = errors.New("no node was found to execute the request")
-func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) {
- var resp *Resp
- var outErr error
- err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool {
- resp, outErr = callback(c, fCtx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
-}
-
// forEachNode executes callback for each node in the container until true is returned.
// Returns errNoSuitableNode if there was no successful attempt to dial any node.
-func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error {
+func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error {
for _, n := range cntNodes {
if bytes.Equal(n.PublicKey(), s.rawPub) {
return nil
@@ -41,15 +28,25 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
var called bool
for _, n := range cntNodes {
var stop bool
- for endpoint := range n.NetworkEndpoints() {
- stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool {
- called = true
- return f(fCtx, c)
- })
- if called {
- break
+ n.IterateNetworkEndpoints(func(endpoint string) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
+ trace.WithAttributes(
+ attribute.String("endpoint", endpoint),
+ ))
+ defer span.End()
+
+ c, err := s.cache.get(ctx, endpoint)
+ if err != nil {
+ return false
}
- }
+
+ s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+
+ called = true
+ stop = f(c)
+ return true
+ })
if stop {
return nil
}
@@ -59,19 +56,3 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
}
return nil
}
-
-func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
- trace.WithAttributes(
- attribute.String("endpoint", endpoint),
- ))
- defer span.End()
-
- c, err := s.cache.get(ctx, endpoint)
- if err != nil {
- return false
- }
-
- s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint))
- return f(ctx, c)
-}
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index ee40884eb..95c8f8013 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -39,7 +40,6 @@ const (
defaultReplicatorCapacity = 64
defaultReplicatorWorkerCount = 64
defaultReplicatorSendTimeout = time.Second * 5
- defaultSyncBatchSize = 1000
)
func (s *Service) localReplicationWorker(ctx context.Context) {
@@ -57,8 +57,8 @@ func (s *Service) localReplicationWorker(ctx context.Context) {
err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
if err != nil {
- s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation,
- zap.Error(err))
+ s.log.Error(logs.TreeFailedToApplyReplicatedOperation,
+ zap.String("err", err.Error()))
}
span.End()
}
@@ -89,23 +89,41 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
var lastErr error
var lastAddr string
- for addr := range n.NetworkEndpoints() {
+ n.IterateNetworkEndpoints(func(addr string) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
+ attribute.String("address", addr),
+ ),
+ )
+ defer span.End()
+
lastAddr = addr
- lastErr = s.apply(ctx, n, addr, req)
- if lastErr == nil {
- break
+
+ c, err := s.cache.get(ctx, addr)
+ if err != nil {
+ lastErr = fmt.Errorf("can't create client: %w", err)
+ return false
}
- }
+
+ ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
+ _, lastErr = c.Apply(ctx, req)
+ cancel()
+
+ return lastErr == nil
+ })
if lastErr != nil {
if errors.Is(lastErr, errRecentlyFailed) {
- s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode,
- zap.String("last_error", lastErr.Error()))
+ s.log.Debug(logs.TreeDoNotSendUpdateToTheNode,
+ zap.String("last_error", lastErr.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
} else {
- s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode,
+ s.log.Warn(logs.TreeFailedToSentUpdateToTheNode,
zap.String("last_error", lastErr.Error()),
zap.String("address", lastAddr),
- zap.String("key", hex.EncodeToString(n.PublicKey())))
+ zap.String("key", hex.EncodeToString(n.PublicKey())),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
s.metrics.AddReplicateTaskDuration(time.Since(start), false)
return lastErr
@@ -114,26 +132,6 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
return nil
}
-func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
- trace.WithAttributes(
- attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
- attribute.String("address", addr),
- ),
- )
- defer span.End()
-
- c, err := s.cache.get(ctx, addr)
- if err != nil {
- return fmt.Errorf("can't create client: %w", err)
- }
-
- ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
- _, err = c.Apply(ctx, req)
- cancel()
- return err
-}
-
func (s *Service) replicateLoop(ctx context.Context) {
for range s.replicatorWorkerCount {
go s.replicationWorker(ctx)
@@ -153,10 +151,10 @@ func (s *Service) replicateLoop(ctx context.Context) {
return
case op := <-s.replicateCh:
start := time.Now()
- err := s.replicate(ctx, op)
+ err := s.replicate(op)
if err != nil {
- s.log.Error(ctx, logs.TreeErrorDuringReplication,
- zap.Error(err),
+ s.log.Error(logs.TreeErrorDuringReplication,
+ zap.String("err", err.Error()),
zap.Stringer("cid", op.cid),
zap.String("treeID", op.treeID))
}
@@ -165,14 +163,14 @@ func (s *Service) replicateLoop(ctx context.Context) {
}
}
-func (s *Service) replicate(ctx context.Context, op movePair) error {
+func (s *Service) replicate(op movePair) error {
req := newApplyRequest(&op)
err := SignMessage(req, s.key)
if err != nil {
return fmt.Errorf("can't sign data: %w", err)
}
- nodes, localIndex, err := s.getContainerNodes(ctx, op.cid)
+ nodes, localIndex, err := s.getContainerNodes(op.cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -206,7 +204,7 @@ func newApplyRequest(op *movePair) *ApplyRequest {
TreeId: op.treeID,
Operation: &LogMove{
ParentId: op.op.Parent,
- Meta: op.op.Bytes(),
+ Meta: op.op.Meta.Bytes(),
ChildId: op.op.Child,
},
},
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 3994d6973..60bb1a6ad 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -5,19 +5,16 @@ import (
"context"
"errors"
"fmt"
- "slices"
+ "sort"
"sync"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
@@ -58,19 +55,17 @@ func New(opts ...Option) *Service {
s.replicatorChannelCapacity = defaultReplicatorCapacity
s.replicatorWorkerCount = defaultReplicatorWorkerCount
s.replicatorTimeout = defaultReplicatorSendTimeout
- s.syncBatchSize = defaultSyncBatchSize
s.metrics = defaultMetricsRegister{}
- s.authorizedKeys.Store(&[][]byte{})
for i := range opts {
opts[i](&s.cfg)
}
if s.log == nil {
- s.log = logger.NewLoggerWrapper(zap.NewNop())
+ s.log = &logger.Logger{Logger: zap.NewNop()}
}
- s.cache.init(s.key, s.ds)
+ s.cache.init(s.key)
s.closeCh = make(chan struct{})
s.replicateCh = make(chan movePair, s.replicatorChannelCapacity)
s.replicateLocalCh = make(chan applyOp)
@@ -87,7 +82,6 @@ func New(opts ...Option) *Service {
// Start starts the service.
func (s *Service) Start(ctx context.Context) {
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String())
go s.replicateLoop(ctx)
go s.syncLoop(ctx)
@@ -107,7 +101,6 @@ func (s *Service) Shutdown() {
}
func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
- defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -119,17 +112,26 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).Add)
+ var resp *AddResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.Add(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
@@ -151,7 +153,6 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
}
func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
- defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -163,17 +164,26 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).AddByPath)
+ var resp *AddByPathResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.AddByPath(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
meta := protoToMeta(b.GetMeta())
@@ -207,7 +217,6 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
}
func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
- defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -219,17 +228,26 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).Remove)
+ var resp *RemoveResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.Remove(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
if b.GetNodeId() == pilorama.RootID {
@@ -252,7 +270,6 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
// Move applies client operation to the specified tree and pushes in queue
// for replication on other nodes.
func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
- defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -264,17 +281,26 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).Move)
+ var resp *MoveResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.Move(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
if b.GetNodeId() == pilorama.RootID {
@@ -296,7 +322,6 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
}
func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
- defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -308,17 +333,26 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).GetNodeByPath)
+ var resp *GetNodeByPathResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.GetNodeByPath(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
attr := b.GetPathAttribute()
@@ -347,11 +381,14 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
} else {
var metaValue []KeyValue
for _, kv := range m.Items {
- if slices.Contains(b.GetAttributes(), kv.Key) {
- metaValue = append(metaValue, KeyValue{
- Key: kv.Key,
- Value: kv.Value,
- })
+ for _, attr := range b.GetAttributes() {
+ if kv.Key == attr {
+ metaValue = append(metaValue, KeyValue{
+ Key: kv.Key,
+ Value: kv.Value,
+ })
+ break
+ }
}
}
x.Meta = metaValue
@@ -367,7 +404,6 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
}
func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
- defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -379,20 +415,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return err
}
- err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return err
}
- ns, pos, err := s.getContainerNodes(srv.Context(), cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetSubTreeClient
var outErr error
- err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
- cli, outErr = c.GetSubTree(fCtx, req)
+ err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
+ cli, outErr = c.GetSubTree(srv.Context(), req)
return true
})
if err != nil {
@@ -414,7 +450,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
type stackItem struct {
values []pilorama.MultiNodeInfo
parent pilorama.MultiNode
- last *pilorama.Cursor
+ last *string
}
func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
@@ -438,8 +474,10 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
}
if ms == nil {
ms = m.Items
- } else if len(m.Items) != 1 {
- return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
+ } else {
+ if len(m.Items) != 1 {
+ return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
+ }
}
ts = append(ts, m.Time)
ps = append(ps, p)
@@ -463,13 +501,14 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
break
}
- var err error
- item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
+ nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
if err != nil {
return err
}
+ item.values = nodes
+ item.last = last
- if len(item.values) == 0 {
+ if len(nodes) == 0 {
stack = stack[:len(stack)-1]
continue
}
@@ -581,9 +620,10 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di
if len(nodes) == 0 {
return nodes, nil
}
- slices.SortFunc(nodes, func(a, b pilorama.NodeInfo) int {
- return bytes.Compare(a.Meta.GetAttr(pilorama.AttributeFilename), b.Meta.GetAttr(pilorama.AttributeFilename))
- })
+ less := func(i, j int) bool {
+ return bytes.Compare(nodes[i].Meta.GetAttr(pilorama.AttributeFilename), nodes[j].Meta.GetAttr(pilorama.AttributeFilename)) < 0
+ }
+ sort.Slice(nodes, less)
return nodes, nil
default:
return nil, fmt.Errorf("unsupported order direction: %s", d.String())
@@ -591,8 +631,7 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di
}
// Apply locally applies operation from the remote node to the tree.
-func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
- defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx))
+func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) {
err := verifyMessage(req)
if err != nil {
return nil, err
@@ -605,7 +644,7 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse,
key := req.GetSignature().GetKey()
- _, pos, _, err := s.getContainerInfo(ctx, cid, key)
+ _, pos, _, err := s.getContainerInfo(cid, key)
if err != nil {
return nil, err
}
@@ -636,7 +675,6 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse,
}
func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
- defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -648,15 +686,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
return err
}
- ns, pos, err := s.getContainerNodes(srv.Context(), cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetOpLogClient
var outErr error
- err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
- cli, outErr = c.GetOpLog(fCtx, req)
+ err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
+ cli, outErr = c.GetOpLog(srv.Context(), req)
return true
})
if err != nil {
@@ -687,7 +725,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
Body: &GetOpLogResponse_Body{
Operation: &LogMove{
ParentId: lm.Parent,
- Meta: lm.Bytes(),
+ Meta: lm.Meta.Bytes(),
ChildId: lm.Child,
},
},
@@ -701,7 +739,6 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
}
func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
- defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -721,12 +758,21 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).TreeList)
+ var resp *TreeListResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.TreeList(ctx, req)
+ return outErr == nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
ids, err := s.forest.TreeList(ctx, cid)
@@ -763,8 +809,8 @@ func metaToProto(arr []pilorama.KeyValue) []KeyValue {
// getContainerInfo returns the list of container nodes, position in the container for the node
// with pub key and total amount of nodes in all replicas.
-func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
- cntNodes, _, err := s.getContainerNodes(ctx, cid)
+func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
+ cntNodes, _, err := s.getContainerNodes(cid)
if err != nil {
return nil, 0, 0, err
}
@@ -784,15 +830,3 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec
return new(HealthcheckResponse), nil
}
-
-func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) {
- s.authorizedKeys.Store(fromPublicKeys(newKeys))
-}
-
-func fromPublicKeys(keys keys.PublicKeys) *[][]byte {
- buff := make([][]byte, len(keys))
- for i, k := range keys {
- buff[i] = k.Bytes()
- }
- return &buff
-}
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index 88d002621..7b6abb1dd 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -5,9 +5,9 @@ package tree
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -181,51 +181,24 @@ func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
+ const prefix string = ",\"parentId\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
+ out.Uint64(x.ParentId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
+ const prefix string = ",\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -237,18 +210,9 @@ func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
+ const prefix string = ",\"bearerToken\":"
out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.BearerToken)
}
out.RawByte('}')
}
@@ -281,13 +245,7 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -299,15 +257,7 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentId":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.ParentId = f
}
case "meta":
@@ -327,13 +277,7 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "bearerToken":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.BearerToken = f
}
}
@@ -469,25 +413,14 @@ func (x *AddRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -622,19 +555,11 @@ func (x *AddResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
- out.RawByte('"')
+ const prefix string = ",\"nodeId\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.NodeId)
}
out.RawByte('}')
}
@@ -667,15 +592,7 @@ func (x *AddResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "nodeId":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.NodeId = f
}
}
@@ -811,25 +728,14 @@ func (x *AddResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1070,49 +976,24 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"pathAttribute\":"
+ const prefix string = ",\"pathAttribute\":"
out.RawString(prefix)
out.String(x.PathAttribute)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"path\":"
+ const prefix string = ",\"path\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Path {
@@ -1124,12 +1005,7 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
+ const prefix string = ",\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -1141,18 +1017,9 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
+ const prefix string = ",\"bearerToken\":"
out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.BearerToken)
}
out.RawByte('}')
}
@@ -1185,13 +1052,7 @@ func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -1236,13 +1097,7 @@ func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "bearerToken":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.BearerToken = f
}
}
@@ -1378,25 +1233,14 @@ func (x *AddByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1553,38 +1397,23 @@ func (x *AddByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodes\":"
- out.RawString(prefix)
+ const prefix string = ",\"nodes\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Nodes {
if i != 0 {
out.RawByte(',')
}
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Nodes[i], 10)
- out.RawByte('"')
+ out.Uint64(x.Nodes[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
+ const prefix string = ",\"parentId\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
+ out.Uint64(x.ParentId)
}
out.RawByte('}')
}
@@ -1620,15 +1449,7 @@ func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
list = append(list, f)
in.WantComma()
}
@@ -1638,15 +1459,7 @@ func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentId":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.ParentId = f
}
}
@@ -1782,25 +1595,14 @@ func (x *AddByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -1995,57 +1797,26 @@ func (x *RemoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
+ const prefix string = ",\"nodeId\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
- out.RawByte('"')
+ out.Uint64(x.NodeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
+ const prefix string = ",\"bearerToken\":"
out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.BearerToken)
}
out.RawByte('}')
}
@@ -2078,13 +1849,7 @@ func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -2096,27 +1861,13 @@ func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "nodeId":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.NodeId = f
}
case "bearerToken":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.BearerToken = f
}
}
@@ -2252,25 +2003,14 @@ func (x *RemoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2544,25 +2284,14 @@ func (x *RemoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -2803,63 +2532,29 @@ func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
+ const prefix string = ",\"parentId\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
+ out.Uint64(x.ParentId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
+ const prefix string = ",\"nodeId\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
- out.RawByte('"')
+ out.Uint64(x.NodeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
+ const prefix string = ",\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -2871,18 +2566,9 @@ func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
+ const prefix string = ",\"bearerToken\":"
out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.BearerToken)
}
out.RawByte('}')
}
@@ -2915,13 +2601,7 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -2933,29 +2613,13 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentId":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.ParentId = f
}
case "nodeId":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.NodeId = f
}
case "meta":
@@ -2975,13 +2639,7 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "bearerToken":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.BearerToken = f
}
}
@@ -3117,25 +2775,14 @@ func (x *MoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3409,25 +3056,14 @@ func (x *MoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -3702,49 +3338,24 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"pathAttribute\":"
+ const prefix string = ",\"pathAttribute\":"
out.RawString(prefix)
out.String(x.PathAttribute)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"path\":"
+ const prefix string = ",\"path\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Path {
@@ -3756,12 +3367,7 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"attributes\":"
+ const prefix string = ",\"attributes\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Attributes {
@@ -3773,38 +3379,19 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"latestOnly\":"
+ const prefix string = ",\"latestOnly\":"
out.RawString(prefix)
out.Bool(x.LatestOnly)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"allAttributes\":"
+ const prefix string = ",\"allAttributes\":"
out.RawString(prefix)
out.Bool(x.AllAttributes)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
+ const prefix string = ",\"bearerToken\":"
out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.BearerToken)
}
out.RawByte('}')
}
@@ -3837,13 +3424,7 @@ func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -3899,13 +3480,7 @@ func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "bearerToken":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.BearerToken = f
}
}
@@ -4041,25 +3616,14 @@ func (x *GetNodeByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4260,39 +3824,19 @@ func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
- out.RawByte('"')
+ const prefix string = ",\"nodeId\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.NodeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"timestamp\":"
+ const prefix string = ",\"timestamp\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp, 10)
- out.RawByte('"')
+ out.Uint64(x.Timestamp)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
+ const prefix string = ",\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -4304,16 +3848,9 @@ func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) {
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
+ const prefix string = ",\"parentId\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
+ out.Uint64(x.ParentId)
}
out.RawByte('}')
}
@@ -4346,29 +3883,13 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "nodeId":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.NodeId = f
}
case "timestamp":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.Timestamp = f
}
case "meta":
@@ -4388,15 +3909,7 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentId":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.ParentId = f
}
}
@@ -4494,16 +4007,10 @@ func (x *GetNodeByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodes\":"
- out.RawString(prefix)
+ const prefix string = ",\"nodes\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Nodes {
if i != 0 {
@@ -4688,25 +4195,14 @@ func (x *GetNodeByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -4873,22 +4369,11 @@ func (x *GetSubTreeRequest_Body_Order) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"direction\":"
- out.RawString(prefix)
- v := int32(x.Direction)
- if vv, ok := GetSubTreeRequest_Body_Order_Direction_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
+ const prefix string = ",\"direction\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.Direction))
}
out.RawByte('}')
}
@@ -5134,82 +4619,41 @@ func (x *GetSubTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"rootId\":"
+ const prefix string = ",\"rootId\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.RootId {
if i != 0 {
out.RawByte(',')
}
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.RootId[i], 10)
- out.RawByte('"')
+ out.Uint64(x.RootId[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"depth\":"
+ const prefix string = ",\"depth\":"
out.RawString(prefix)
out.Uint32(x.Depth)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
+ const prefix string = ",\"bearerToken\":"
out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.BearerToken)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"orderBy\":"
+ const prefix string = ",\"orderBy\":"
out.RawString(prefix)
x.OrderBy.MarshalEasyJSON(out)
}
@@ -5244,13 +4688,7 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -5265,15 +4703,7 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
list = append(list, f)
in.WantComma()
}
@@ -5283,27 +4713,13 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "depth":
{
var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
+ f = in.Uint32()
x.Depth = f
}
case "bearerToken":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.BearerToken = f
}
case "orderBy":
@@ -5446,25 +4862,14 @@ func (x *GetSubTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -5671,72 +5076,45 @@ func (x *GetSubTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
- out.RawString(prefix)
+ const prefix string = ",\"nodeId\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.NodeId {
if i != 0 {
out.RawByte(',')
}
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId[i], 10)
- out.RawByte('"')
+ out.Uint64(x.NodeId[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
+ const prefix string = ",\"parentId\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.ParentId {
if i != 0 {
out.RawByte(',')
}
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId[i], 10)
- out.RawByte('"')
+ out.Uint64(x.ParentId[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"timestamp\":"
+ const prefix string = ",\"timestamp\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Timestamp {
if i != 0 {
out.RawByte(',')
}
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp[i], 10)
- out.RawByte('"')
+ out.Uint64(x.Timestamp[i])
}
out.RawByte(']')
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
+ const prefix string = ",\"meta\":"
out.RawString(prefix)
out.RawByte('[')
for i := range x.Meta {
@@ -5781,15 +5159,7 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
list = append(list, f)
in.WantComma()
}
@@ -5802,15 +5172,7 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
list = append(list, f)
in.WantComma()
}
@@ -5823,15 +5185,7 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
var list []uint64
in.Delim('[')
for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
list = append(list, f)
in.WantComma()
}
@@ -5985,25 +5339,14 @@ func (x *GetSubTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6138,21 +5481,11 @@ func (x *TreeListRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
out.RawByte('}')
}
@@ -6185,13 +5518,7 @@ func (x *TreeListRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
}
@@ -6327,25 +5654,14 @@ func (x *TreeListRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6480,16 +5796,10 @@ func (x *TreeListResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"ids\":"
- out.RawString(prefix)
+ const prefix string = ",\"ids\":"
+ out.RawString(prefix[1:])
out.RawByte('[')
for i := range x.Ids {
if i != 0 {
@@ -6673,25 +5983,14 @@ func (x *TreeListResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -6869,39 +6168,19 @@ func (x *ApplyRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"operation\":"
+ const prefix string = ",\"operation\":"
out.RawString(prefix)
x.Operation.MarshalEasyJSON(out)
}
@@ -6936,13 +6215,7 @@ func (x *ApplyRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -7091,25 +6364,14 @@ func (x *ApplyRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7383,25 +6645,14 @@ func (x *ApplyResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -7596,55 +6847,26 @@ func (x *GetOpLogRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
+ const prefix string = ",\"treeId\":"
out.RawString(prefix)
out.String(x.TreeId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"height\":"
+ const prefix string = ",\"height\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10)
- out.RawByte('"')
+ out.Uint64(x.Height)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"count\":"
+ const prefix string = ",\"count\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Count, 10)
- out.RawByte('"')
+ out.Uint64(x.Count)
}
out.RawByte('}')
}
@@ -7677,13 +6899,7 @@ func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "containerId":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.ContainerId = f
}
case "treeId":
@@ -7695,29 +6911,13 @@ func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "height":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.Height = f
}
case "count":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.Count = f
}
}
@@ -7853,25 +7053,14 @@ func (x *GetOpLogRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -8009,16 +7198,10 @@ func (x *GetOpLogResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"operation\":"
- out.RawString(prefix)
+ const prefix string = ",\"operation\":"
+ out.RawString(prefix[1:])
x.Operation.MarshalEasyJSON(out)
}
out.RawByte('}')
@@ -8189,25 +7372,14 @@ func (x *GetOpLogResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -8481,25 +7653,14 @@ func (x *HealthcheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
@@ -8773,25 +7934,14 @@ func (x *HealthcheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
x.Body.MarshalEasyJSON(out)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
x.Signature.MarshalEasyJSON(out)
}
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 8221a4546..58cab659f 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -8,17 +8,19 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "go.uber.org/zap"
)
type message interface {
@@ -28,7 +30,16 @@ type message interface {
SetSignature(*Signature)
}
+func basicACLErr(op acl.Op) error {
+ return fmt.Errorf("access to operation %s is denied by basic ACL check", op)
+}
+
+func eACLErr(op eacl.Operation, err error) error {
+ return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err)
+}
+
var (
+ errBearerWrongOwner = errors.New("bearer token must be signed by the container owner")
errBearerWrongContainer = errors.New("bearer token is created for another container")
errBearerSignature = errors.New("invalid bearer token signature")
)
@@ -38,7 +49,7 @@ var (
// Operation must be one of:
// - 1. ObjectPut;
// - 2. ObjectGet.
-func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error {
+func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error {
err := verifyMessage(req)
if err != nil {
return err
@@ -49,14 +60,16 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return err
}
- cnr, err := s.cnrSource.Get(ctx, cid)
+ cnr, err := s.cnrSource.Get(cid)
if err != nil {
return fmt.Errorf("can't get container %s: %w", cid, err)
}
- bt, err := parseBearer(rawBearer, cid)
+ eaclOp := eACLOp(op)
+
+ bt, err := parseBearer(rawBearer, cid, eaclOp)
if err != nil {
- return fmt.Errorf("access to operation %s is denied: %w", op, err)
+ return err
}
role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt)
@@ -64,22 +77,56 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return fmt.Errorf("can't get request role: %w", err)
}
- if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil {
- return apeErr(err)
+ basicACL := cnr.Value.BasicACL()
+ // Basic ACL mask can be unset, if a container operations are performed
+ // with strict APE checks only.
+ //
+ // FIXME(@aarifullin): tree service temporiraly performs APE checks on
+ // object verbs, because tree verbs have not been introduced yet.
+ if basicACL == 0x0 {
+ return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey)
}
- return nil
-}
-func apeErr(err error) error {
- var chRouterErr *checkercore.ChainRouterError
- if !errors.As(err, &chRouterErr) {
- errServerInternal := &apistatus.ServerInternal{}
- apistatus.WriteInternalServerErr(errServerInternal, err)
- return errServerInternal
+ if !basicACL.IsOpAllowed(op, role) {
+ return basicACLErr(op)
}
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(err.Error())
- return errAccessDenied
+
+ if !basicACL.Extendable() {
+ return nil
+ }
+
+ var useBearer bool
+ if len(rawBearer) != 0 {
+ if !basicACL.AllowedBearerRules(op) {
+ s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL,
+ zap.String("cid", cid.EncodeToString()),
+ zap.Stringer("op", op),
+ )
+ } else {
+ useBearer = true
+ }
+ }
+
+ var tb eacl.Table
+ signer := req.GetSignature().GetKey()
+ if useBearer && !bt.Impersonate() {
+ if !bearer.ResolveIssuer(*bt).Equals(cnr.Value.Owner()) {
+ return eACLErr(eaclOp, errBearerWrongOwner)
+ }
+ tb = bt.EACLTable()
+ } else {
+ tbCore, err := s.eaclSource.GetEACL(cid)
+ if err != nil {
+ return handleGetEACLError(err)
+ }
+ tb = *tbCore.Value
+
+ if useBearer && bt.Impersonate() {
+ signer = bt.SigningKeyBytes()
+ }
+ }
+
+ return checkEACL(tb, signer, eACLRole(role), eaclOp)
}
// Returns true iff the operation is read-only and request was signed
@@ -95,32 +142,40 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) {
}
key := sign.GetKey()
- for _, currentKey := range *s.authorizedKeys.Load() {
- if bytes.Equal(currentKey, key) {
+ for i := range s.authorizedKeys {
+ if bytes.Equal(s.authorizedKeys[i], key) {
return true, nil
}
}
return false, nil
}
-func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) {
+func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*bearer.Token, error) {
if len(rawBearer) == 0 {
return nil, nil
}
bt := new(bearer.Token)
if err := bt.Unmarshal(rawBearer); err != nil {
- return nil, fmt.Errorf("invalid bearer token: %w", err)
+ return nil, eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err))
}
if !bt.AssertContainer(cid) {
- return nil, errBearerWrongContainer
+ return nil, eACLErr(eaclOp, errBearerWrongContainer)
}
if !bt.VerifySignature() {
- return nil, errBearerSignature
+ return nil, eACLErr(eaclOp, errBearerSignature)
}
return bt, nil
}
+func handleGetEACLError(err error) error {
+ if client.IsErrEACLNotFound(err) {
+ return nil
+ }
+
+ return fmt.Errorf("get eACL table: %w", err)
+}
+
func verifyMessage(m message) error {
binBody, err := m.ReadSignedData(nil)
if err != nil {
@@ -194,3 +249,84 @@ func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (a
return role, pub, nil
}
+
+func eACLOp(op acl.Op) eacl.Operation {
+ switch op {
+ case acl.OpObjectGet:
+ return eacl.OperationGet
+ case acl.OpObjectPut:
+ return eacl.OperationPut
+ default:
+ panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op))
+ }
+}
+
+func eACLRole(role acl.Role) eacl.Role {
+ switch role {
+ case acl.RoleOwner:
+ return eacl.RoleUser
+ case acl.RoleOthers:
+ return eacl.RoleOthers
+ default:
+ panic(fmt.Sprintf("unexpected tree service ACL role: %s", role))
+ }
+}
+
+var (
+ errDENY = errors.New("DENY eACL rule")
+ errNoAllowRules = errors.New("not found allowing rules for the request")
+)
+
+// checkEACL searches for the eACL rules that could be applied to the request
+// (a tuple of a signer key, his FrostFS role and a request operation).
+// It does not filter the request by the filters of the eACL table since tree
+// requests do not contain any "object" information that could be filtered and,
+// therefore, filtering leads to unexpected results.
+// The code was copied with the minor updates from the SDK repo:
+// https://github.com/nspcc-dev/frostfs-sdk-go/blob/43a57d42dd50dc60465bfd3482f7f12bcfcf3411/eacl/validator.go#L28.
+func checkEACL(tb eacl.Table, signer []byte, role eacl.Role, op eacl.Operation) error {
+ for _, record := range tb.Records() {
+ // check type of operation
+ if record.Operation() != op {
+ continue
+ }
+
+ // check target
+ if !targetMatches(record, role, signer) {
+ continue
+ }
+
+ switch a := record.Action(); a {
+ case eacl.ActionAllow:
+ return nil
+ case eacl.ActionDeny:
+ return eACLErr(op, errDENY)
+ default:
+ return eACLErr(op, fmt.Errorf("unexpected action: %s", a))
+ }
+ }
+
+ return eACLErr(op, errNoAllowRules)
+}
+
+func targetMatches(rec eacl.Record, role eacl.Role, signer []byte) bool {
+ for _, target := range rec.Targets() {
+ // check public key match
+ if pubs := target.BinaryKeys(); len(pubs) != 0 {
+ for _, key := range pubs {
+ if bytes.Equal(key, signer) {
+ return true
+ }
+ }
+
+ continue
+ }
+
+ // check target group match
+ if role == target.Role() {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index 8815c227f..3c3ebfe89 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -4,70 +4,32 @@ import (
"context"
"crypto/ecdsa"
"crypto/sha256"
- "encoding/hex"
"errors"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- aclV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
- "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
)
-const versionTreeID = "version"
-
type dummyNetmapSource struct {
netmap.Source
}
-type dummySubjectProvider struct {
- subjects map[util.Uint160]client.SubjectExtended
-}
-
-func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
- res := s.subjects[addr]
- return &client.Subject{
- PrimaryKey: res.PrimaryKey,
- AdditionalKeys: res.AdditionalKeys,
- Namespace: res.Namespace,
- Name: res.Name,
- KV: res.KV,
- }, nil
-}
-
-func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
- res := s.subjects[addr]
- return &res, nil
-}
-
-type dummyEpochSource struct {
- epoch uint64
-}
-
-func (s dummyEpochSource) CurrentEpoch() uint64 {
- return s.epoch
-}
-
type dummyContainerSource map[string]*containercore.Container
-func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
+func (s dummyContainerSource) List() ([]cid.ID, error) {
res := make([]cid.ID, 0, len(s))
var cnr cid.ID
@@ -83,7 +45,7 @@ func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
return res, nil
}
-func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) {
+func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
cnt, ok := s[id.String()]
if !ok {
return nil, errors.New("container not found")
@@ -91,10 +53,20 @@ func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercor
return cnt, nil
}
-func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) {
+func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) {
return &containercore.DelInfo{}, nil
}
+type dummyEACLSource map[string]*containercore.EACL
+
+func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) {
+ cntEACL, ok := s[id.String()]
+ if !ok {
+ return nil, errors.New("container not found")
+ }
+ return cntEACL, nil
+}
+
func testContainer(owner user.ID) container.Container {
var r netmapSDK.ReplicaDescriptor
r.SetNumberOfObjects(1)
@@ -109,8 +81,6 @@ func testContainer(owner user.ID) container.Container {
return cnt
}
-const currentEpoch = 123
-
func TestMessageSign(t *testing.T) {
privs := make([]*keys.PrivateKey, 4)
for i := range privs {
@@ -129,15 +99,6 @@ func TestMessageSign(t *testing.T) {
Value: testContainer(ownerID),
}
- e := inmemory.NewInMemoryLocalOverrides()
- e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{
- Type: engine.Container,
- Name: cid1.EncodeToString(),
- }, testChain(privs[0].PublicKey(), privs[1].PublicKey()))
- frostfsidProvider := dummySubjectProvider{
- subjects: make(map[util.Uint160]client.SubjectExtended),
- }
-
s := &Service{
cfg: cfg{
log: test.NewLogger(t),
@@ -146,13 +107,14 @@ func TestMessageSign(t *testing.T) {
cnrSource: dummyContainerSource{
cid1.String(): cnr,
},
- frostfsidSubjectProvider: frostfsidProvider,
- state: dummyEpochSource{epoch: currentEpoch},
+ eaclSource: dummyEACLSource{
+ cid1.String(): &containercore.EACL{
+ Value: testTable(cid1, privs[0].PublicKey(), privs[1].PublicKey()),
+ },
+ },
},
- apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}),
}
- s.cfg.authorizedKeys.Store(&[][]byte{})
rawCID1 := make([]byte, sha256.Size)
cid1.Encode(rawCID1)
@@ -171,26 +133,26 @@ func TestMessageSign(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRW)
t.Run("missing signature, no panic", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
})
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op))
t.Run("invalid CID", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
})
cnr.Value.SetBasicACL(acl.Private)
t.Run("extension disabled", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op))
})
t.Run("invalid key", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op))
})
t.Run("bearer", func(t *testing.T) {
@@ -203,7 +165,7 @@ func TestMessageSign(t *testing.T) {
t.Run("invalid bearer", func(t *testing.T) {
req.Body.BearerToken = []byte{0xFF}
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer CID", func(t *testing.T) {
@@ -212,7 +174,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer owner", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -220,7 +182,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer signature", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -232,112 +194,20 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bv2.StableMarshal(nil)
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- })
-
- t.Run("omit override within bt", func(t *testing.T) {
- t.Run("personated", func(t *testing.T) {
- bt := testBearerTokenNoOverride()
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override")
- })
-
- t.Run("impersonated", func(t *testing.T) {
- bt := testBearerTokenNoOverride()
- bt.SetImpersonate(true)
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- })
- })
-
- t.Run("invalid override within bearer token", func(t *testing.T) {
- t.Run("personated", func(t *testing.T) {
- bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
- })
-
- t.Run("impersonated", func(t *testing.T) {
- bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
- bt.SetImpersonate(true)
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
- })
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("impersonate", func(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRWExtended)
var bt bearer.Token
- bt.SetExp(10)
- bt.SetImpersonate(true)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid1.EncodeToString(),
- },
- Chains: []ape.Chain{},
- })
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
- })
-
- t.Run("impersonate, but target user is still set", func(t *testing.T) {
- var bt bearer.Token
- bt.SetExp(10)
bt.SetImpersonate(true)
- var reqSigner user.ID
- user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey()))
-
- bt.ForUser(reqSigner)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid1.EncodeToString(),
- },
- Chains: []ape.Chain{},
- })
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
- })
-
- t.Run("impersonate but invalid signer", func(t *testing.T) {
- var bt bearer.Token
- bt.SetExp(10)
- bt.SetImpersonate(true)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid1.EncodeToString(),
- },
- Chains: []ape.Chain{},
- })
require.NoError(t, bt.Sign(privs[1].PrivateKey))
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -347,95 +217,64 @@ func TestMessageSign(t *testing.T) {
t.Run("put and get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("only get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[2].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("none", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[3].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
})
}
func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token {
var b bearer.Token
- b.SetExp(currentEpoch + 1)
- b.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid.EncodeToString(),
- },
- Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
- })
+ b.SetEACLTable(*testTable(cid, forPutGet, forGet))
return b
}
-func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token {
- var b bearer.Token
- b.SetExp(currentEpoch + 1)
- b.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- },
- Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
- })
+func testTable(cid cid.ID, forPutGet, forGet *keys.PublicKey) *eaclSDK.Table {
+ tgtGet := eaclSDK.NewTarget()
+ tgtGet.SetRole(eaclSDK.RoleUnknown)
+ tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()})
- return b
-}
+ rGet := eaclSDK.NewRecord()
+ rGet.SetAction(eaclSDK.ActionAllow)
+ rGet.SetOperation(eaclSDK.OperationGet)
+ rGet.SetTargets(*tgtGet)
-func testBearerTokenNoOverride() bearer.Token {
- var b bearer.Token
- b.SetExp(currentEpoch + 1)
- return b
-}
+ tgtPut := eaclSDK.NewTarget()
+ tgtPut.SetRole(eaclSDK.RoleUnknown)
+ tgtPut.SetBinaryKeys([][]byte{forPutGet.Bytes()})
-func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain {
- ruleGet := chain.Rule{
- Status: chain.Allow,
- Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
- Actions: chain.Actions{Names: []string{native.MethodGetObject}},
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: native.PropertyKeyActorPublicKey,
- Value: hex.EncodeToString(forPutGet.Bytes()),
- },
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: native.PropertyKeyActorPublicKey,
- Value: hex.EncodeToString(forGet.Bytes()),
- },
- },
- }
- rulePut := chain.Rule{
- Status: chain.Allow,
- Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
- Actions: chain.Actions{Names: []string{native.MethodPutObject}},
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: native.PropertyKeyActorPublicKey,
- Value: hex.EncodeToString(forPutGet.Bytes()),
- },
- },
+ rPut := eaclSDK.NewRecord()
+ rPut.SetAction(eaclSDK.ActionAllow)
+ rPut.SetOperation(eaclSDK.OperationPut)
+ rPut.SetTargets(*tgtPut)
+
+ tb := eaclSDK.NewTable()
+ tb.AddRecord(rGet)
+ tb.AddRecord(rPut)
+
+ tgt := eaclSDK.NewTarget()
+ tgt.SetRole(eaclSDK.RoleOthers)
+
+ for _, op := range []eaclSDK.Operation{eaclSDK.OperationGet, eaclSDK.OperationPut} {
+ r := eaclSDK.NewRecord()
+ r.SetAction(eaclSDK.ActionDeny)
+ r.SetTargets(*tgt)
+ r.SetOperation(op)
+ tb.AddRecord(r)
}
- return &chain.Chain{
- Rules: []chain.Rule{
- ruleGet,
- rulePut,
- },
- }
+ tb.SetCID(cid)
+
+ return tb
}
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index af355639f..5bbc93978 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -2,9 +2,7 @@ package tree
import (
"context"
- "crypto/ecdsa"
"crypto/sha256"
- "crypto/tls"
"errors"
"fmt"
"io"
@@ -15,8 +13,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -24,15 +20,12 @@ import (
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@@ -46,7 +39,7 @@ const defaultSyncWorkerCount = 20
// tree IDs from the other container nodes. Returns ErrNotInContainer if the node
// is not included in the container.
func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
- nodes, pos, err := s.getContainerNodes(ctx, cid)
+ nodes, pos, err := s.getContainerNodes(cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -78,8 +71,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
var treesToSync []string
var outErr error
- err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool {
- resp, outErr = c.TreeList(fCtx, req)
+ err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool {
+ resp, outErr = c.TreeList(ctx, req)
if outErr != nil {
return false
}
@@ -99,7 +92,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
for _, tid := range treesToSync {
h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree,
+ s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
continue
@@ -107,7 +100,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
if h < newHeight {
if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
- s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
+ s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
}
@@ -119,7 +112,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
// SynchronizeTree tries to synchronize log starting from the last stored height.
func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error {
- nodes, pos, err := s.getContainerNodes(ctx, cid)
+ nodes, pos, err := s.getContainerNodes(cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -138,9 +131,14 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string
}
// mergeOperationStreams performs merge sort for node operation streams to one stream.
-func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
+func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
defer close(merged)
+ ms := make([]*pilorama.Move, len(streams))
+ for i := range streams {
+ ms[i] = <-streams[i]
+ }
+
// Merging different node streams shuffles incoming operations like that:
//
// x - operation from the stream A
@@ -152,15 +150,6 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
// operation height from the stream B. This height is stored in minStreamedLastHeight.
var minStreamedLastHeight uint64 = math.MaxUint64
- ms := make([]*pilorama.Move, len(streams))
- for i := range streams {
- select {
- case ms[i] = <-streams[i]:
- case <-ctx.Done():
- return minStreamedLastHeight
- }
- }
-
for {
var minTimeMoveTime uint64 = math.MaxUint64
minTimeMoveIndex := -1
@@ -175,11 +164,7 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
break
}
- select {
- case merged <- ms[minTimeMoveIndex]:
- case <-ctx.Done():
- return minStreamedLastHeight
- }
+ merged <- ms[minTimeMoveIndex]
height := ms[minTimeMoveIndex].Time
if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
minStreamedLastHeight = min(minStreamedLastHeight, height)
@@ -191,30 +176,38 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string,
operationStream <-chan *pilorama.Move,
-) (uint64, error) {
+) uint64 {
+ errGroup, _ := errgroup.WithContext(ctx)
+ const workersCount = 1024
+ errGroup.SetLimit(workersCount)
+
+ // We run TreeApply concurrently for the operation batch. Let's consider two operations
+ // in the batch m1 and m2 such that m1.Time < m2.Time. The engine may apply m2 and fail
+ // on m1. That means the service must start sync from m1.Time in the next iteration and
+ // this height is stored in unappliedOperationHeight.
+ var unappliedOperationHeight uint64 = math.MaxUint64
+ var heightMtx sync.Mutex
+
var prev *pilorama.Move
- var batch []*pilorama.Move
for m := range operationStream {
// skip already applied op
if prev != nil && prev.Time == m.Time {
continue
}
prev = m
- batch = append(batch, m)
- if len(batch) == s.syncBatchSize {
- if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
- return batch[0].Time, err
+ errGroup.Go(func() error {
+ if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil {
+ heightMtx.Lock()
+ unappliedOperationHeight = min(unappliedOperationHeight, m.Time)
+ heightMtx.Unlock()
+ return err
}
- batch = batch[:0]
- }
+ return nil
+ })
}
- if len(batch) > 0 {
- if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
- return batch[0].Time, err
- }
- }
- return math.MaxUint64, nil
+ _ = errGroup.Wait()
+ return unappliedOperationHeight
}
func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
@@ -247,14 +240,10 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
Parent: lm.GetParentId(),
Child: lm.GetChildId(),
}
- if err := m.FromBytes(lm.GetMeta()); err != nil {
+ if err := m.Meta.FromBytes(lm.GetMeta()); err != nil {
return err
}
- select {
- case opsCh <- m:
- case <-ctx.Done():
- return ctx.Err()
- }
+ opsCh <- m
}
if !errors.Is(err, io.EOF) {
return err
@@ -270,7 +259,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
treeID string, nodes []netmapSDK.NodeInfo,
) uint64 {
- s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
+ s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
errGroup, egCtx := errgroup.WithContext(ctx)
const workersCount = 1024
@@ -283,14 +272,13 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
merged := make(chan *pilorama.Move)
var minStreamedLastHeight uint64
errGroup.Go(func() error {
- minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged)
+ minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged)
return nil
})
var minUnappliedHeight uint64
errGroup.Go(func() error {
- var err error
- minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged)
- return err
+ minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged)
+ return nil
})
var allNodesSynced atomic.Bool
@@ -299,27 +287,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
for i, n := range nodes {
errGroup.Go(func() error {
var nodeSynced bool
- for addr := range n.NetworkEndpoints() {
+ n.IterateNetworkEndpoints(func(addr string) bool {
var a network.Address
if err := a.FromString(addr); err != nil {
- s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- continue
+ s.log.Warn(logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ return false
}
- cc, err := dialTreeService(ctx, a, s.key, s.ds)
+ cc, err := s.createConnection(a)
if err != nil {
- s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- continue
+ s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ return false
}
+ defer cc.Close()
err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i])
if err != nil {
- s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
+ s.log.Warn(logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
}
nodeSynced = err == nil
- _ = cc.Close()
- break
- }
+ return true
+ })
close(nodeOperationStreams[i])
if !nodeSynced {
allNodesSynced.Store(false)
@@ -329,7 +317,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
}
if err := errGroup.Wait(); err != nil {
allNodesSynced.Store(false)
- s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
+ s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
}
newHeight := minStreamedLastHeight
@@ -344,60 +332,17 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return from
}
-func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) {
- cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer()))
- if err != nil {
- return nil, err
- }
-
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- defer cancel()
-
- req := &HealthcheckRequest{
- Body: &HealthcheckRequest_Body{},
- }
- if err := SignMessage(req, key); err != nil {
- return nil, err
- }
-
- // perform some request to check connection
- if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
- _ = cc.Close()
- return nil, err
- }
- return cc, nil
-}
-
-func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
- host, isTLS, err := client.ParseURI(a.URIAddr())
- if err != nil {
- return nil, err
- }
-
- creds := insecure.NewCredentials()
- if isTLS {
- creds = credentials.NewTLS(&tls.Config{})
- }
-
- defaultOpts := []grpc.DialOption{
+func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
+ return grpc.NewClient(a.URIAddr(),
grpc.WithChainUnaryInterceptor(
- qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
- tracing_grpc.NewUnaryClientInterceptor(),
- tagging.NewUnaryClientInterceptor(),
+ tracing_grpc.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
- qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
metrics.NewStreamClientInterceptor(),
tracing_grpc.NewStreamClientInterceptor(),
- tagging.NewStreamClientInterceptor(),
),
- grpc.WithTransportCredentials(creds),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- grpc.WithDisableServiceConfig(),
- }
-
- return grpc.NewClient(host, append(defaultOpts, opts...)...)
+ grpc.WithTransportCredentials(insecure.NewCredentials()))
}
// ErrAlreadySyncing is returned when a service synchronization has already
@@ -437,25 +382,25 @@ func (s *Service) syncLoop(ctx context.Context) {
return
case <-s.syncChan:
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
- s.log.Info(ctx, logs.TreeSyncingTrees)
+ s.log.Debug(logs.TreeSyncingTrees)
start := time.Now()
- cnrs, err := s.cnrSource.List(ctx)
+ cnrs, err := s.cfg.cnrSource.List()
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
+ s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err))
s.metrics.AddSyncDuration(time.Since(start), false)
span.End()
break
}
- newMap, cnrsToSync := s.containersToSync(ctx, cnrs)
+ newMap, cnrsToSync := s.containersToSync(cnrs)
s.syncContainers(ctx, cnrsToSync)
s.removeContainers(ctx, newMap)
- s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized)
+ s.log.Debug(logs.TreeTreesHaveBeenSynchronized)
s.metrics.AddSyncDuration(time.Since(start), true)
span.End()
@@ -475,19 +420,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
err := s.syncPool.Submit(func() {
defer wg.Done()
- s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
err := s.synchronizeAllTrees(ctx, cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
+ s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
return
}
- s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
})
if err != nil {
wg.Done()
- s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization,
+ s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization,
zap.Stringer("cid", cnr),
zap.Error(err))
if errors.Is(err, ants.ErrPoolClosed) {
@@ -511,9 +456,9 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
continue
}
- existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr)
+ existed, err := containerCore.WasRemoved(s.cnrSource, cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted,
+ s.log.Error(logs.TreeCouldNotCheckIfContainerExisted,
zap.Stringer("cid", cnr),
zap.Error(err))
} else if existed {
@@ -525,25 +470,25 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
}
for _, cnr := range removed {
- s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
err := s.DropTree(ctx, cnr, "")
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree,
+ s.log.Error(logs.TreeCouldNotRemoveRedundantTree,
zap.Stringer("cid", cnr),
zap.Error(err))
}
}
}
-func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
+func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
newMap := make(map[cid.ID]struct{}, len(s.cnrMap))
cnrsToSync := make([]cid.ID, 0, len(cnrs))
for _, cnr := range cnrs {
- _, pos, err := s.getContainerNodes(ctx, cnr)
+ _, pos, err := s.getContainerNodes(cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes,
+ s.log.Error(logs.TreeCouldNotCalculateContainerNodes,
zap.Stringer("cid", cnr),
zap.Error(err))
continue
diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go
index 87d419408..497d90554 100644
--- a/pkg/services/tree/sync_test.go
+++ b/pkg/services/tree/sync_test.go
@@ -1,7 +1,6 @@
package tree
import (
- "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -65,7 +64,7 @@ func Test_mergeOperationStreams(t *testing.T) {
merged := make(chan *pilorama.Move, 1)
min := make(chan uint64)
go func() {
- min <- mergeOperationStreams(context.Background(), nodeOpChans, merged)
+ min <- mergeOperationStreams(nodeOpChans, merged)
}()
var res []uint64
diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go
index 2827b10a9..4399f8a8b 100644
--- a/pkg/services/tree/types_frostfs.pb.go
+++ b/pkg/services/tree/types_frostfs.pb.go
@@ -5,13 +5,12 @@ package tree
import (
json "encoding/json"
fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
easyproto "github.com/VictoriaMetrics/easyproto"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
- strconv "strconv"
)
type KeyValue struct {
@@ -114,31 +113,16 @@ func (x *KeyValue) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
out.String(x.Key)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"value\":"
+ const prefix string = ",\"value\":"
out.RawString(prefix)
- if x.Value != nil {
- out.Base64Bytes(x.Value)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Value)
}
out.RawByte('}')
}
@@ -177,13 +161,7 @@ func (x *KeyValue) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "value":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Value = f
}
}
@@ -315,45 +293,21 @@ func (x *LogMove) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentID\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
+ const prefix string = ",\"parentID\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.ParentId)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
+ const prefix string = ",\"meta\":"
out.RawString(prefix)
- if x.Meta != nil {
- out.Base64Bytes(x.Meta)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Meta)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"childID\":"
+ const prefix string = ",\"childID\":"
out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ChildId, 10)
- out.RawByte('"')
+ out.Uint64(x.ChildId)
}
out.RawByte('}')
}
@@ -386,41 +340,19 @@ func (x *LogMove) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "parentID":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.ParentId = f
}
case "meta":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Meta = f
}
case "childID":
{
var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
+ f = in.Uint64()
x.ChildId = f
}
}
@@ -532,35 +464,16 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString("null")
return
}
- first := true
out.RawByte('{')
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- if x.Key != nil {
- out.Base64Bytes(x.Key)
- } else {
- out.String("")
- }
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Key)
}
{
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
+ const prefix string = ",\"signature\":"
out.RawString(prefix)
- if x.Sign != nil {
- out.Base64Bytes(x.Sign)
- } else {
- out.String("")
- }
+ out.Base64Bytes(x.Sign)
}
out.RawByte('}')
}
@@ -593,25 +506,13 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
case "key":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Key = f
}
case "signature":
{
var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
+ f = in.Bytes()
x.Sign = f
}
}
diff --git a/pkg/services/util/response/service.go b/pkg/services/util/response/service.go
index 5152a8ece..005a643e5 100644
--- a/pkg/services/util/response/service.go
+++ b/pkg/services/util/response/service.go
@@ -1,10 +1,10 @@
package response
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go
index 348a45a94..bce43d6e8 100644
--- a/pkg/services/util/sign.go
+++ b/pkg/services/util/sign.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go
index 66581878a..547c8d50b 100644
--- a/pkg/util/attributes/parser_test.go
+++ b/pkg/util/attributes/parser_test.go
@@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) {
mExp = mSrc
}
- for key, value := range node.Attributes() {
+ node.IterateAttributes(func(key, value string) {
v, ok := mExp[key]
require.True(t, ok)
require.Equal(t, value, v)
delete(mExp, key)
- }
+ })
require.Empty(t, mExp)
}
diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go
index 8569ec734..a9877e007 100644
--- a/pkg/util/http/calls.go
+++ b/pkg/util/http/calls.go
@@ -32,8 +32,8 @@ func (x *Server) Serve() error {
//
// Once Shutdown has been called on a server, it may not be reused;
// future calls to Serve method will have no effect.
-func (x *Server) Shutdown(ctx context.Context) error {
- ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout)
+func (x *Server) Shutdown() error {
+ ctx, cancel := context.WithTimeout(context.Background(), x.shutdownTimeout)
err := x.srv.Shutdown(ctx)
diff --git a/pkg/util/http/pprof.go b/pkg/util/http/pprof.go
index f85fd2ea9..7a0413000 100644
--- a/pkg/util/http/pprof.go
+++ b/pkg/util/http/pprof.go
@@ -3,14 +3,8 @@ package httputil
import (
"net/http"
"net/http/pprof"
-
- "github.com/felixge/fgprof"
)
-func init() {
- http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler())
-}
-
// initializes pprof package in order to
// register Prometheus handlers on http.DefaultServeMux.
var _ = pprof.Handler("")
diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go
index 2589ab786..923412a7f 100644
--- a/pkg/util/http/server.go
+++ b/pkg/util/http/server.go
@@ -76,7 +76,8 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server {
o(c)
}
- if c.shutdownTimeout <= 0 {
+ switch {
+ case c.shutdownTimeout <= 0:
panicOnOptValue("shutdown timeout", c.shutdownTimeout)
}
diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go
index 6337039a9..b2942b52a 100644
--- a/pkg/util/keyer/dashboard.go
+++ b/pkg/util/keyer/dashboard.go
@@ -6,7 +6,6 @@ import (
"os"
"text/tabwriter"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -105,7 +104,9 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) {
func base58ToHex(data string) string {
val, err := base58.Decode(data)
- assert.NoError(err, "produced incorrect base58 value")
+ if err != nil {
+ panic("produced incorrect base58 value")
+ }
return hex.EncodeToString(val)
}
diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go
deleted file mode 100644
index 413b1d9aa..000000000
--- a/pkg/util/logger/log.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package logger
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
- qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "go.uber.org/zap"
-)
-
-func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Debug(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Info(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Warn(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Error(msg, appendContext(ctx, fields...)...)
-}
-
-func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field {
- if traceID := tracing.GetTraceID(ctx); traceID != "" {
- fields = append(fields, zap.String("trace_id", traceID))
- }
- if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined {
- fields = append(fields, zap.String("io_tag", ioTag))
- }
- return fields
-}
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index a1998cb1a..e67afb36b 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -2,7 +2,6 @@ package logger
import (
"fmt"
- "time"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/ssgreg/journald"
@@ -13,10 +12,8 @@ import (
// Logger represents a component
// for writing messages to log.
type Logger struct {
- z *zap.Logger
- c zapcore.Core
- t Tag
- w bool
+ *zap.Logger
+ lvl zap.AtomicLevel
}
// Prm groups Logger's parameters.
@@ -25,8 +22,16 @@ type Logger struct {
// Parameters that have been connected to the Logger support its
// configuration changing.
//
-// See also Logger.Reload, SetLevelString.
+// Passing Prm after a successful connection via the NewLogger, connects
+// the Prm to a new instance of the Logger.
+//
+// See also Reload, SetLevelString.
type Prm struct {
+ // link to the created Logger
+ // instance; used for a runtime
+ // reconfiguration
+ _log *Logger
+
// support runtime rereading
level zapcore.Level
@@ -35,15 +40,6 @@ type Prm struct {
// do not support runtime rereading
dest string
-
- // PrependTimestamp specifies whether to prepend a timestamp in the log
- PrependTimestamp bool
-
- // Options for zap.Logger
- Options []zap.Option
-
- // map of tag's bit masks to log level, overrides lvl
- tl map[Tag]zapcore.Level
}
const (
@@ -73,10 +69,20 @@ func (p *Prm) SetDestination(d string) error {
return nil
}
-// SetTags parses list of tags with log level.
-func (p *Prm) SetTags(tags [][]string) (err error) {
- p.tl, err = parseTags(tags)
- return err
+// Reload reloads configuration of a connected instance of the Logger.
+// Returns ErrLoggerNotConnected if no connection has been performed.
+// Returns any reconfiguration error from the Logger directly.
+func (p Prm) Reload() error {
+ if p._log == nil {
+ // incorrect logger usage
+ panic("parameters are not connected to any Logger")
+ }
+
+ return p._log.reload(p)
+}
+
+func defaultPrm() *Prm {
+ return new(Prm)
}
// NewLogger constructs a new zap logger instance. Constructing with nil
@@ -90,7 +96,10 @@ func (p *Prm) SetTags(tags [][]string) (err error) {
// - ISO8601 time encoding.
//
// Logger records a stack trace for all messages at or above fatal level.
-func NewLogger(prm Prm) (*Logger, error) {
+func NewLogger(prm *Prm) (*Logger, error) {
+ if prm == nil {
+ prm = defaultPrm()
+ }
switch prm.dest {
case DestinationUndefined, DestinationStdout:
return newConsoleLogger(prm)
@@ -101,143 +110,59 @@ func NewLogger(prm Prm) (*Logger, error) {
}
}
-func newConsoleLogger(prm Prm) (*Logger, error) {
+func newConsoleLogger(prm *Prm) (*Logger, error) {
+ lvl := zap.NewAtomicLevelAt(prm.level)
+
c := zap.NewProductionConfig()
- c.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
+ c.Level = lvl
c.Encoding = "console"
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
- if prm.PrependTimestamp {
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
- } else {
- c.EncoderConfig.TimeKey = ""
- }
-
- opts := []zap.Option{
+ lZap, err := c.Build(
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- zap.AddCallerSkip(1),
- }
- opts = append(opts, prm.Options...)
- lZap, err := c.Build(opts...)
+ )
if err != nil {
return nil, err
}
- l := &Logger{z: lZap, c: lZap.Core()}
- l = l.WithTag(TagMain)
+
+ l := &Logger{Logger: lZap, lvl: lvl}
+ prm._log = l
return l, nil
}
-func newJournaldLogger(prm Prm) (*Logger, error) {
+func newJournaldLogger(prm *Prm) (*Logger, error) {
+ lvl := zap.NewAtomicLevelAt(prm.level)
+
c := zap.NewProductionConfig()
+ c.Level = lvl
+ c.Encoding = "console"
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
- if prm.PrependTimestamp {
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
- } else {
- c.EncoderConfig.TimeKey = ""
- }
-
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
- core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields)
+ core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
coreWithContext := core.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
})
- var samplerOpts []zapcore.SamplerOption
- if c.Sampling.Hook != nil {
- samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook))
- }
- samplingCore := zapcore.NewSamplerWithOptions(
- coreWithContext,
- time.Second,
- c.Sampling.Initial,
- c.Sampling.Thereafter,
- samplerOpts...,
- )
- opts := []zap.Option{
- zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- zap.AddCallerSkip(1),
- }
- opts = append(opts, prm.Options...)
- lZap := zap.New(samplingCore, opts...)
- l := &Logger{z: lZap, c: lZap.Core()}
- l = l.WithTag(TagMain)
+ lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
+
+ l := &Logger{Logger: lZap, lvl: lvl}
+ prm._log = l
return l, nil
}
-// With create a child logger with new fields, don't affect the parent.
-// Throws panic if tag is unset.
-func (l *Logger) With(fields ...zap.Field) *Logger {
- if l.t == 0 {
- panic("tag is unset")
- }
- c := *l
- c.z = l.z.With(fields...)
- // With called under the logger
- c.w = true
- return &c
-}
-
-type core struct {
- c zapcore.Core
- l zap.AtomicLevel
-}
-
-func (c *core) Enabled(lvl zapcore.Level) bool {
- return c.l.Enabled(lvl)
-}
-
-func (c *core) With(fields []zapcore.Field) zapcore.Core {
- clone := *c
- clone.c = clone.c.With(fields)
- return &clone
-}
-
-func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
- return c.c.Check(e, ce)
-}
-
-func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error {
- return c.c.Write(e, fields)
-}
-
-func (c *core) Sync() error {
- return c.c.Sync()
-}
-
-// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger.
-// Throws panic if provided unsupported tag.
-func (l *Logger) WithTag(tag Tag) *Logger {
- if tag == 0 || tag > Tag(len(_Tag_index)-1) {
- panic("unsupported tag " + tag.String())
- }
- if l.w {
- panic("unsupported operation for the logger's state")
- }
- c := *l
- c.t = tag
- c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core {
- return &core{
- c: l.c.With([]zap.Field{zap.String("tag", tag.String())}),
- l: tagToLogLevel[tag],
- }
- }))
- return &c
-}
-
-func NewLoggerWrapper(z *zap.Logger) *Logger {
- return &Logger{
- z: z.WithOptions(zap.AddCallerSkip(1)),
- t: TagMain,
- c: z.Core(),
- }
+func (l *Logger) reload(prm Prm) error {
+ l.lvl.SetLevel(prm.level)
+ return nil
}
diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go
deleted file mode 100644
index b867ee6cc..000000000
--- a/pkg/util/logger/logger_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package logger
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/require"
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest/observer"
-)
-
-func BenchmarkLogger(b *testing.B) {
- ctx := context.Background()
- m := map[string]Prm{}
-
- prm := Prm{}
- require.NoError(b, prm.SetLevelString("debug"))
- m["logging enabled"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("error"))
- m["logging disabled"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("error"))
- require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}}))
- m["logging enabled via tags"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("debug"))
- require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}}))
- m["logging disabled via tags"] = prm
-
- for k, v := range m {
- b.Run(k, func(b *testing.B) {
- logger, err := createLogger(v)
- require.NoError(b, err)
- UpdateLevelForTags(v)
- b.ResetTimer()
- b.ReportAllocs()
- for range b.N {
- logger.Info(ctx, "test info")
- }
- })
- }
-}
-
-type testCore struct {
- core zapcore.Core
-}
-
-func (c *testCore) Enabled(lvl zapcore.Level) bool {
- return c.core.Enabled(lvl)
-}
-
-func (c *testCore) With(fields []zapcore.Field) zapcore.Core {
- c.core = c.core.With(fields)
- return c
-}
-
-func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
- return ce.AddCore(e, c)
-}
-
-func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error {
- return nil
-}
-
-func (c *testCore) Sync() error {
- return c.core.Sync()
-}
-
-func createLogger(prm Prm) (*Logger, error) {
- prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
- tc := testCore{core: core}
- return &tc
- })}
- return NewLogger(prm)
-}
-
-func TestLoggerOutput(t *testing.T) {
- obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel))
-
- prm := Prm{}
- require.NoError(t, prm.SetLevelString("debug"))
- prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core {
- return obs
- })}
- loggerMain, err := NewLogger(prm)
- require.NoError(t, err)
- UpdateLevelForTags(prm)
-
- loggerMainWith := loggerMain.With(zap.String("key", "value"))
-
- require.Panics(t, func() {
- loggerMainWith.WithTag(TagShard)
- })
- loggerShard := loggerMain.WithTag(TagShard)
- loggerShard = loggerShard.With(zap.String("key1", "value1"))
-
- loggerMorph := loggerMain.WithTag(TagMorph)
- loggerMorph = loggerMorph.With(zap.String("key2", "value2"))
-
- ctx := context.Background()
- loggerMain.Debug(ctx, "main")
- loggerMainWith.Debug(ctx, "main with")
- loggerShard.Debug(ctx, "shard")
- loggerMorph.Debug(ctx, "morph")
-
- require.Len(t, logs.All(), 4)
- require.Len(t, logs.FilterFieldKey("key").All(), 1)
- require.Len(t, logs.FilterFieldKey("key1").All(), 1)
- require.Len(t, logs.FilterFieldKey("key2").All(), 1)
- require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2)
- require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1)
- require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1)
-}
diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result
deleted file mode 100644
index 612fa2967..000000000
--- a/pkg/util/logger/logger_test.result
+++ /dev/null
@@ -1,46 +0,0 @@
-goos: linux
-goarch: amd64
-pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger
-cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
-BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op
-PASS
-ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s
diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go
deleted file mode 100644
index 1b98f2e62..000000000
--- a/pkg/util/logger/tag_string.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT.
-
-package logger
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[TagMain-1]
- _ = x[TagMorph-2]
- _ = x[TagGrpcSvc-3]
- _ = x[TagIr-4]
- _ = x[TagProcessor-5]
- _ = x[TagEngine-6]
- _ = x[TagBlobovnicza-7]
- _ = x[TagBlobovniczaTree-8]
- _ = x[TagBlobstor-9]
- _ = x[TagFSTree-10]
- _ = x[TagGC-11]
- _ = x[TagShard-12]
- _ = x[TagWriteCache-13]
- _ = x[TagDeleteSvc-14]
- _ = x[TagGetSvc-15]
- _ = x[TagSearchSvc-16]
- _ = x[TagSessionSvc-17]
- _ = x[TagTreeSvc-18]
- _ = x[TagPolicer-19]
- _ = x[TagReplicator-20]
-}
-
-const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator"
-
-var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148}
-
-func (i Tag) String() string {
- i -= 1
- if i >= Tag(len(_Tag_index)-1) {
- return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")"
- }
- return _Tag_name[_Tag_index[i]:_Tag_index[i+1]]
-}
diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go
deleted file mode 100644
index a5386707e..000000000
--- a/pkg/util/logger/tags.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package logger
-
-import (
- "fmt"
- "strings"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-//go:generate stringer -type Tag -linecomment
-
-type Tag uint8
-
-const (
- _ Tag = iota //
- TagMain // main
- TagMorph // morph
- TagGrpcSvc // grpcsvc
- TagIr // ir
- TagProcessor // processor
- TagEngine // engine
- TagBlobovnicza // blobovnicza
- TagBlobovniczaTree // blobovniczatree
- TagBlobstor // blobstor
- TagFSTree // fstree
- TagGC // gc
- TagShard // shard
- TagWriteCache // writecache
- TagDeleteSvc // deletesvc
- TagGetSvc // getsvc
- TagSearchSvc // searchsvc
- TagSessionSvc // sessionsvc
- TagTreeSvc // treesvc
- TagPolicer // policer
- TagReplicator // replicator
-
- defaultLevel = zapcore.InfoLevel
-)
-
-var (
- tagToLogLevel = map[Tag]zap.AtomicLevel{}
- stringToTag = map[string]Tag{}
-)
-
-func init() {
- for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ {
- tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel)
- stringToTag[i.String()] = i
- }
-}
-
-// parseTags returns:
-// - map(always instantiated) of tag to custom log level for that tag;
-// - error if it occurred(map is empty).
-func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) {
- m := make(map[Tag]zapcore.Level)
- if len(raw) == 0 {
- return m, nil
- }
- for _, item := range raw {
- str, level := item[0], item[1]
- if len(level) == 0 {
- // It is not necessary to parse tags without level,
- // because default log level will be used.
- continue
- }
- var l zapcore.Level
- err := l.UnmarshalText([]byte(level))
- if err != nil {
- return nil, err
- }
- tmp := strings.Split(str, ",")
- for _, tagStr := range tmp {
- tag, ok := stringToTag[strings.TrimSpace(tagStr)]
- if !ok {
- return nil, fmt.Errorf("unsupported tag %s", str)
- }
- m[tag] = l
- }
- }
- return m, nil
-}
-
-func UpdateLevelForTags(prm Prm) {
- for k, v := range tagToLogLevel {
- nk, ok := prm.tl[k]
- if ok {
- v.SetLevel(nk)
- } else {
- v.SetLevel(prm.level)
- }
- }
-}
diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go
index b5b0a31eb..f93756d17 100644
--- a/pkg/util/logger/test/logger.go
+++ b/pkg/util/logger/test/logger.go
@@ -11,10 +11,9 @@ import (
// NewLogger creates a new logger.
func NewLogger(t testing.TB) *logger.Logger {
- return logger.NewLoggerWrapper(
- zaptest.NewLogger(t,
- zaptest.Level(zapcore.DebugLevel),
- zaptest.WrapOptions(zap.Development(), zap.AddCaller()),
- ),
- )
+ var l logger.Logger
+ l.Logger = zaptest.NewLogger(t,
+ zaptest.Level(zapcore.DebugLevel),
+ zaptest.WrapOptions(zap.Development(), zap.AddCaller()))
+ return &l
}
diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go
index a06296a07..97508f82a 100644
--- a/pkg/util/rand/rand.go
+++ b/pkg/util/rand/rand.go
@@ -13,7 +13,7 @@ func Uint64() uint64 {
return source.Uint64()
}
-// Uint32 returns a random uint32 value.
+// Uint64 returns a random uint32 value.
func Uint32() uint32 {
return source.Uint32()
}
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index bd15d0e8f..e94ff77ad 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -24,7 +24,7 @@ var (
errSocketIsNotInitialized = errors.New("socket is not initialized")
)
-// InitSocket initializes socket with provided name of
+// Initializes socket with provided name of
// environment variable.
func InitSocket() error {
notifySocket := os.Getenv("NOTIFY_SOCKET")
@@ -59,8 +59,6 @@ func FlagAndStatus(status string) error {
return fmt.Errorf("clock_gettime: %w", err)
}
status += "\nMONOTONIC_USEC=" + strconv.FormatInt(ts.Nano()/1000, 10)
- status += "\nSTATUS=RELOADING"
- return Send(status)
}
status += "\nSTATUS=" + strings.TrimSuffix(status, "=1")
return Send(status)
diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go
deleted file mode 100644
index 7373e538f..000000000
--- a/pkg/util/testing/netmap_source.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package testing
-
-import (
- "context"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-var (
- errInvalidDiff = errors.New("invalid diff")
- errNetmapNotFound = errors.New("netmap not found")
-)
-
-type TestNetmapSource struct {
- Netmaps map[uint64]*netmap.NetMap
- CurrentEpoch uint64
-}
-
-func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
- if diff >= s.CurrentEpoch {
- return nil, errInvalidDiff
- }
- return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff)
-}
-
-func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) {
- if nm, found := s.Netmaps[epoch]; found {
- return nm, nil
- }
- return nil, errNetmapNotFound
-}
-
-func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) {
- return s.CurrentEpoch, nil
-}
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
index 39a420358..d2004b673 100644
--- a/scripts/populate-metabase/internal/generate.go
+++ b/scripts/populate-metabase/internal/generate.go
@@ -1,10 +1,8 @@
package internal
import (
- cryptorand "crypto/rand"
"crypto/sha256"
"fmt"
- "math/rand"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -16,13 +14,14 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "golang.org/x/exp/rand"
)
func GeneratePayloadPool(count uint, size uint) [][]byte {
- var pool [][]byte
- for range count {
+ pool := [][]byte{}
+ for i := uint(0); i < count; i++ {
payload := make([]byte, size)
- _, _ = cryptorand.Read(payload)
+ _, _ = rand.Read(payload)
pool = append(pool, payload)
}
@@ -30,9 +29,9 @@ func GeneratePayloadPool(count uint, size uint) [][]byte {
}
func GenerateAttributePool(count uint) []objectSDK.Attribute {
- var pool []objectSDK.Attribute
- for i := range count {
- for j := range count {
+ pool := []objectSDK.Attribute{}
+ for i := uint(0); i < count; i++ {
+ for j := uint(0); j < count; j++ {
attr := *objectSDK.NewAttribute()
attr.SetKey(fmt.Sprintf("key%d", i))
attr.SetValue(fmt.Sprintf("value%d", j))
@@ -43,8 +42,8 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute {
}
func GenerateOwnerPool(count uint) []user.ID {
- var pool []user.ID
- for range count {
+ pool := []user.ID{}
+ for i := uint(0); i < count; i++ {
pool = append(pool, usertest.ID())
}
return pool
@@ -118,8 +117,8 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption {
func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
return func(obj *objectSDK.Object) {
- var attrs []objectSDK.Attribute
- for range count {
+ attrs := []objectSDK.Attribute{}
+ for i := uint(0); i < count; i++ {
attrs = append(attrs, pool[rand.Intn(len(pool))])
}
obj.SetAttributes(attrs...)
diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go
index fafe61eaa..390c1cdc0 100644
--- a/scripts/populate-metabase/internal/populate.go
+++ b/scripts/populate-metabase/internal/populate.go
@@ -29,12 +29,15 @@ func PopulateWithObjects(
) {
digits := "0123456789"
- for range count {
+ for i := uint(0); i < count; i++ {
obj := factory()
- id := fmt.Appendf(nil, "%c/%c/%c",
+
+ id := []byte(fmt.Sprintf(
+ "%c/%c/%c",
digits[rand.Int()%len(digits)],
digits[rand.Int()%len(digits)],
- digits[rand.Int()%len(digits)])
+ digits[rand.Int()%len(digits)],
+ ))
prm := meta.PutPrm{}
prm.SetObject(obj)
@@ -56,7 +59,7 @@ func PopulateWithBigObjects(
count uint,
factory func() *objectSDK.Object,
) {
- for range count {
+ for i := uint(0); i < count; i++ {
group.Go(func() error {
if err := populateWithBigObject(ctx, db, factory); err != nil {
return fmt.Errorf("couldn't put a big object: %w", err)
@@ -151,7 +154,7 @@ func PopulateGraveyard(
wg := &sync.WaitGroup{}
wg.Add(int(count))
- for range count {
+ for i := uint(0); i < count; i++ {
obj := factory()
prm := meta.PutPrm{}
@@ -223,7 +226,7 @@ func PopulateLocked(
wg := &sync.WaitGroup{}
wg.Add(int(count))
- for range count {
+ for i := uint(0); i < count; i++ {
defer wg.Done()
obj := factory()
diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go
index 8c4ea41ad..2bc7a5553 100644
--- a/scripts/populate-metabase/main.go
+++ b/scripts/populate-metabase/main.go
@@ -91,15 +91,15 @@ func populate() (err error) {
return fmt.Errorf("couldn't open the metabase: %w", err)
}
defer func() {
- if errOnClose := db.Close(ctx); errOnClose != nil {
+ if errOnClose := db.Close(); errOnClose != nil {
err = errors.Join(
err,
- fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)),
+ fmt.Errorf("couldn't close the metabase: %w", db.Close()),
)
}
}()
- if err = db.Init(ctx); err != nil {
+ if err = db.Init(); err != nil {
return fmt.Errorf("couldn't init the metabase: %w", err)
}
@@ -116,7 +116,7 @@ func populate() (err error) {
eg, ctx := errgroup.WithContext(ctx)
eg.SetLimit(int(jobs))
- for range numContainers {
+ for i := uint(0); i < numContainers; i++ {
cid := cidtest.ID()
for _, typ := range types {